chore(api/core): apply ruff reformatting (#7624)

This commit is contained in:
Bowen Liang
2024-09-10 17:00:20 +08:00
committed by GitHub
parent 178730266d
commit 2cf1187b32
724 changed files with 21180 additions and 21123 deletions

View File

@@ -18,12 +18,21 @@ class Callback:
Base class for callbacks.
Only for LLM.
"""
raise_error: bool = False
def on_before_invoke(self, llm_instance: AIModel, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_before_invoke(
self,
llm_instance: AIModel,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
Before invoke callback
@@ -39,10 +48,19 @@ class Callback:
"""
raise NotImplementedError()
def on_new_chunk(self, llm_instance: AIModel, chunk: LLMResultChunk, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None):
def on_new_chunk(
self,
llm_instance: AIModel,
chunk: LLMResultChunk,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
):
"""
On new chunk callback
@@ -59,10 +77,19 @@ class Callback:
"""
raise NotImplementedError()
def on_after_invoke(self, llm_instance: AIModel, result: LLMResult, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_after_invoke(
self,
llm_instance: AIModel,
result: LLMResult,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
After invoke callback
@@ -79,10 +106,19 @@ class Callback:
"""
raise NotImplementedError()
def on_invoke_error(self, llm_instance: AIModel, ex: Exception, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_invoke_error(
self,
llm_instance: AIModel,
ex: Exception,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
Invoke error callback
@@ -99,9 +135,7 @@ class Callback:
"""
raise NotImplementedError()
def print_text(
self, text: str, color: Optional[str] = None, end: str = ""
) -> None:
def print_text(self, text: str, color: Optional[str] = None, end: str = "") -> None:
"""Print text with highlighting and no end characters."""
text_to_print = self._get_colored_text(text, color) if color else text
print(text_to_print, end=end)

View File

@@ -10,11 +10,20 @@ from core.model_runtime.model_providers.__base.ai_model import AIModel
logger = logging.getLogger(__name__)
class LoggingCallback(Callback):
def on_before_invoke(self, llm_instance: AIModel, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_before_invoke(
self,
llm_instance: AIModel,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
Before invoke callback
@@ -28,40 +37,49 @@ class LoggingCallback(Callback):
:param stream: is stream response
:param user: unique user id
"""
self.print_text("\n[on_llm_before_invoke]\n", color='blue')
self.print_text(f"Model: {model}\n", color='blue')
self.print_text("Parameters:\n", color='blue')
self.print_text("\n[on_llm_before_invoke]\n", color="blue")
self.print_text(f"Model: {model}\n", color="blue")
self.print_text("Parameters:\n", color="blue")
for key, value in model_parameters.items():
self.print_text(f"\t{key}: {value}\n", color='blue')
self.print_text(f"\t{key}: {value}\n", color="blue")
if stop:
self.print_text(f"\tstop: {stop}\n", color='blue')
self.print_text(f"\tstop: {stop}\n", color="blue")
if tools:
self.print_text("\tTools:\n", color='blue')
self.print_text("\tTools:\n", color="blue")
for tool in tools:
self.print_text(f"\t\t{tool.name}\n", color='blue')
self.print_text(f"\t\t{tool.name}\n", color="blue")
self.print_text(f"Stream: {stream}\n", color='blue')
self.print_text(f"Stream: {stream}\n", color="blue")
if user:
self.print_text(f"User: {user}\n", color='blue')
self.print_text(f"User: {user}\n", color="blue")
self.print_text("Prompt messages:\n", color='blue')
self.print_text("Prompt messages:\n", color="blue")
for prompt_message in prompt_messages:
if prompt_message.name:
self.print_text(f"\tname: {prompt_message.name}\n", color='blue')
self.print_text(f"\tname: {prompt_message.name}\n", color="blue")
self.print_text(f"\trole: {prompt_message.role.value}\n", color='blue')
self.print_text(f"\tcontent: {prompt_message.content}\n", color='blue')
self.print_text(f"\trole: {prompt_message.role.value}\n", color="blue")
self.print_text(f"\tcontent: {prompt_message.content}\n", color="blue")
if stream:
self.print_text("\n[on_llm_new_chunk]")
def on_new_chunk(self, llm_instance: AIModel, chunk: LLMResultChunk, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None):
def on_new_chunk(
self,
llm_instance: AIModel,
chunk: LLMResultChunk,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
):
"""
On new chunk callback
@@ -79,10 +97,19 @@ class LoggingCallback(Callback):
sys.stdout.write(chunk.delta.message.content)
sys.stdout.flush()
def on_after_invoke(self, llm_instance: AIModel, result: LLMResult, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_after_invoke(
self,
llm_instance: AIModel,
result: LLMResult,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
After invoke callback
@@ -97,24 +124,33 @@ class LoggingCallback(Callback):
:param stream: is stream response
:param user: unique user id
"""
self.print_text("\n[on_llm_after_invoke]\n", color='yellow')
self.print_text(f"Content: {result.message.content}\n", color='yellow')
self.print_text("\n[on_llm_after_invoke]\n", color="yellow")
self.print_text(f"Content: {result.message.content}\n", color="yellow")
if result.message.tool_calls:
self.print_text("Tool calls:\n", color='yellow')
self.print_text("Tool calls:\n", color="yellow")
for tool_call in result.message.tool_calls:
self.print_text(f"\t{tool_call.id}\n", color='yellow')
self.print_text(f"\t{tool_call.function.name}\n", color='yellow')
self.print_text(f"\t{json.dumps(tool_call.function.arguments)}\n", color='yellow')
self.print_text(f"\t{tool_call.id}\n", color="yellow")
self.print_text(f"\t{tool_call.function.name}\n", color="yellow")
self.print_text(f"\t{json.dumps(tool_call.function.arguments)}\n", color="yellow")
self.print_text(f"Model: {result.model}\n", color='yellow')
self.print_text(f"Usage: {result.usage}\n", color='yellow')
self.print_text(f"System Fingerprint: {result.system_fingerprint}\n", color='yellow')
self.print_text(f"Model: {result.model}\n", color="yellow")
self.print_text(f"Usage: {result.usage}\n", color="yellow")
self.print_text(f"System Fingerprint: {result.system_fingerprint}\n", color="yellow")
def on_invoke_error(self, llm_instance: AIModel, ex: Exception, model: str, credentials: dict,
prompt_messages: list[PromptMessage], model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None, stop: Optional[list[str]] = None,
stream: bool = True, user: Optional[str] = None) -> None:
def on_invoke_error(
self,
llm_instance: AIModel,
ex: Exception,
model: str,
credentials: dict,
prompt_messages: list[PromptMessage],
model_parameters: dict,
tools: Optional[list[PromptMessageTool]] = None,
stop: Optional[list[str]] = None,
stream: bool = True,
user: Optional[str] = None,
) -> None:
"""
Invoke error callback
@@ -129,5 +165,5 @@ class LoggingCallback(Callback):
:param stream: is stream response
:param user: unique user id
"""
self.print_text("\n[on_llm_invoke_error]\n", color='red')
self.print_text("\n[on_llm_invoke_error]\n", color="red")
logger.exception(ex)