chore(api/core): apply ruff reformatting (#7624)

This commit is contained in:
Bowen Liang
2024-09-10 17:00:20 +08:00
committed by GitHub
parent 178730266d
commit 2cf1187b32
724 changed files with 21180 additions and 21123 deletions

View File

@@ -1,4 +1,3 @@
from core.model_runtime.entities.llm_entities import LLMResult
from core.model_runtime.entities.message_entities import PromptMessage, SystemPromptMessage, UserPromptMessage
from core.tools.entities.tool_entities import ToolProviderType
@@ -16,40 +15,38 @@ Please summarize the text you got.
class BuiltinTool(Tool):
"""
Builtin tool
Builtin tool
:param meta: the meta data of a tool call processing
:param meta: the meta data of a tool call processing
"""
def invoke_model(
self, user_id: str, prompt_messages: list[PromptMessage], stop: list[str]
) -> LLMResult:
def invoke_model(self, user_id: str, prompt_messages: list[PromptMessage], stop: list[str]) -> LLMResult:
"""
invoke model
invoke model
:param model_config: the model config
:param prompt_messages: the prompt messages
:param stop: the stop words
:return: the model result
:param model_config: the model config
:param prompt_messages: the prompt messages
:param stop: the stop words
:return: the model result
"""
# invoke model
return ModelInvocationUtils.invoke(
user_id=user_id,
tenant_id=self.runtime.tenant_id,
tool_type='builtin',
tool_type="builtin",
tool_name=self.identity.name,
prompt_messages=prompt_messages,
)
def tool_provider_type(self) -> ToolProviderType:
return ToolProviderType.BUILT_IN
def get_max_tokens(self) -> int:
"""
get max tokens
get max tokens
:param model_config: the model config
:return: the max tokens
:param model_config: the model config
:return: the max tokens
"""
return ModelInvocationUtils.get_max_llm_context_tokens(
tenant_id=self.runtime.tenant_id,
@@ -57,39 +54,34 @@ class BuiltinTool(Tool):
def get_prompt_tokens(self, prompt_messages: list[PromptMessage]) -> int:
"""
get prompt tokens
get prompt tokens
:param prompt_messages: the prompt messages
:return: the tokens
:param prompt_messages: the prompt messages
:return: the tokens
"""
return ModelInvocationUtils.calculate_tokens(
tenant_id=self.runtime.tenant_id,
prompt_messages=prompt_messages
)
return ModelInvocationUtils.calculate_tokens(tenant_id=self.runtime.tenant_id, prompt_messages=prompt_messages)
def summary(self, user_id: str, content: str) -> str:
max_tokens = self.get_max_tokens()
if self.get_prompt_tokens(prompt_messages=[
UserPromptMessage(content=content)
]) < max_tokens * 0.6:
if self.get_prompt_tokens(prompt_messages=[UserPromptMessage(content=content)]) < max_tokens * 0.6:
return content
def get_prompt_tokens(content: str) -> int:
return self.get_prompt_tokens(prompt_messages=[
SystemPromptMessage(content=_SUMMARY_PROMPT),
UserPromptMessage(content=content)
])
return self.get_prompt_tokens(
prompt_messages=[SystemPromptMessage(content=_SUMMARY_PROMPT), UserPromptMessage(content=content)]
)
def summarize(content: str) -> str:
summary = self.invoke_model(user_id=user_id, prompt_messages=[
SystemPromptMessage(content=_SUMMARY_PROMPT),
UserPromptMessage(content=content)
], stop=[])
summary = self.invoke_model(
user_id=user_id,
prompt_messages=[SystemPromptMessage(content=_SUMMARY_PROMPT), UserPromptMessage(content=content)],
stop=[],
)
return summary.message.content
lines = content.split('\n')
lines = content.split("\n")
new_lines = []
# split long line into multiple lines
for i in range(len(lines)):
@@ -100,8 +92,8 @@ class BuiltinTool(Tool):
new_lines.append(line)
elif get_prompt_tokens(line) > max_tokens * 0.7:
while get_prompt_tokens(line) > max_tokens * 0.7:
new_lines.append(line[:int(max_tokens * 0.5)])
line = line[int(max_tokens * 0.5):]
new_lines.append(line[: int(max_tokens * 0.5)])
line = line[int(max_tokens * 0.5) :]
new_lines.append(line)
else:
new_lines.append(line)
@@ -125,17 +117,15 @@ class BuiltinTool(Tool):
summary = summarize(message)
summaries.append(summary)
result = '\n'.join(summaries)
result = "\n".join(summaries)
if self.get_prompt_tokens(prompt_messages=[
UserPromptMessage(content=result)
]) > max_tokens * 0.7:
if self.get_prompt_tokens(prompt_messages=[UserPromptMessage(content=result)]) > max_tokens * 0.7:
return self.summary(user_id=user_id, content=result)
return result
def get_url(self, url: str, user_agent: str = None) -> str:
"""
get url
get url
"""
return get_url(url, user_agent=user_agent)
return get_url(url, user_agent=user_agent)