chore(api/core): apply ruff reformatting (#7624)
This commit is contained in:
@@ -43,21 +43,16 @@ class LLMGenerator:
|
||||
|
||||
with measure_time() as timer:
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompts,
|
||||
model_parameters={
|
||||
"max_tokens": 100,
|
||||
"temperature": 1
|
||||
},
|
||||
stream=False
|
||||
prompt_messages=prompts, model_parameters={"max_tokens": 100, "temperature": 1}, stream=False
|
||||
)
|
||||
answer = response.message.content
|
||||
cleaned_answer = re.sub(r'^.*(\{.*\}).*$', r'\1', answer, flags=re.DOTALL)
|
||||
cleaned_answer = re.sub(r"^.*(\{.*\}).*$", r"\1", answer, flags=re.DOTALL)
|
||||
result_dict = json.loads(cleaned_answer)
|
||||
answer = result_dict['Your Output']
|
||||
answer = result_dict["Your Output"]
|
||||
name = answer.strip()
|
||||
|
||||
if len(name) > 75:
|
||||
name = name[:75] + '...'
|
||||
name = name[:75] + "..."
|
||||
|
||||
# get tracing instance
|
||||
trace_manager = TraceQueueManager(app_id=app_id)
|
||||
@@ -79,14 +74,9 @@ class LLMGenerator:
|
||||
output_parser = SuggestedQuestionsAfterAnswerOutputParser()
|
||||
format_instructions = output_parser.get_format_instructions()
|
||||
|
||||
prompt_template = PromptTemplateParser(
|
||||
template="{{histories}}\n{{format_instructions}}\nquestions:\n"
|
||||
)
|
||||
prompt_template = PromptTemplateParser(template="{{histories}}\n{{format_instructions}}\nquestions:\n")
|
||||
|
||||
prompt = prompt_template.format({
|
||||
"histories": histories,
|
||||
"format_instructions": format_instructions
|
||||
})
|
||||
prompt = prompt_template.format({"histories": histories, "format_instructions": format_instructions})
|
||||
|
||||
try:
|
||||
model_manager = ModelManager()
|
||||
@@ -101,12 +91,7 @@ class LLMGenerator:
|
||||
|
||||
try:
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters={
|
||||
"max_tokens": 256,
|
||||
"temperature": 0
|
||||
},
|
||||
stream=False
|
||||
prompt_messages=prompt_messages, model_parameters={"max_tokens": 256, "temperature": 0}, stream=False
|
||||
)
|
||||
|
||||
questions = output_parser.parse(response.message.content)
|
||||
@@ -119,32 +104,24 @@ class LLMGenerator:
|
||||
return questions
|
||||
|
||||
@classmethod
|
||||
def generate_rule_config(cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool, rule_config_max_tokens: int = 512) -> dict:
|
||||
def generate_rule_config(
|
||||
cls, tenant_id: str, instruction: str, model_config: dict, no_variable: bool, rule_config_max_tokens: int = 512
|
||||
) -> dict:
|
||||
output_parser = RuleConfigGeneratorOutputParser()
|
||||
|
||||
error = ""
|
||||
error_step = ""
|
||||
rule_config = {
|
||||
"prompt": "",
|
||||
"variables": [],
|
||||
"opening_statement": "",
|
||||
"error": ""
|
||||
}
|
||||
model_parameters = {
|
||||
"max_tokens": rule_config_max_tokens,
|
||||
"temperature": 0.01
|
||||
}
|
||||
rule_config = {"prompt": "", "variables": [], "opening_statement": "", "error": ""}
|
||||
model_parameters = {"max_tokens": rule_config_max_tokens, "temperature": 0.01}
|
||||
|
||||
if no_variable:
|
||||
prompt_template = PromptTemplateParser(
|
||||
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE
|
||||
)
|
||||
prompt_template = PromptTemplateParser(WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE)
|
||||
|
||||
prompt_generate = prompt_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
},
|
||||
remove_template_variables=False
|
||||
remove_template_variables=False,
|
||||
)
|
||||
|
||||
prompt_messages = [UserPromptMessage(content=prompt_generate)]
|
||||
@@ -158,13 +135,11 @@ class LLMGenerator:
|
||||
|
||||
try:
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
prompt_messages=prompt_messages, model_parameters=model_parameters, stream=False
|
||||
)
|
||||
|
||||
rule_config["prompt"] = response.message.content
|
||||
|
||||
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
error_step = "generate rule config"
|
||||
@@ -179,24 +154,18 @@ class LLMGenerator:
|
||||
# get rule config prompt, parameter and statement
|
||||
prompt_generate, parameter_generate, statement_generate = output_parser.get_format_instructions()
|
||||
|
||||
prompt_template = PromptTemplateParser(
|
||||
prompt_generate
|
||||
)
|
||||
prompt_template = PromptTemplateParser(prompt_generate)
|
||||
|
||||
parameter_template = PromptTemplateParser(
|
||||
parameter_generate
|
||||
)
|
||||
parameter_template = PromptTemplateParser(parameter_generate)
|
||||
|
||||
statement_template = PromptTemplateParser(
|
||||
statement_generate
|
||||
)
|
||||
statement_template = PromptTemplateParser(statement_generate)
|
||||
|
||||
# format the prompt_generate_prompt
|
||||
prompt_generate_prompt = prompt_template.format(
|
||||
inputs={
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
},
|
||||
remove_template_variables=False
|
||||
remove_template_variables=False,
|
||||
)
|
||||
prompt_messages = [UserPromptMessage(content=prompt_generate_prompt)]
|
||||
|
||||
@@ -213,9 +182,7 @@ class LLMGenerator:
|
||||
try:
|
||||
# the first step to generate the task prompt
|
||||
prompt_content = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
prompt_messages=prompt_messages, model_parameters=model_parameters, stream=False
|
||||
)
|
||||
except InvokeError as e:
|
||||
error = str(e)
|
||||
@@ -230,7 +197,7 @@ class LLMGenerator:
|
||||
inputs={
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
},
|
||||
remove_template_variables=False
|
||||
remove_template_variables=False,
|
||||
)
|
||||
parameter_messages = [UserPromptMessage(content=parameter_generate_prompt)]
|
||||
|
||||
@@ -240,15 +207,13 @@ class LLMGenerator:
|
||||
"TASK_DESCRIPTION": instruction,
|
||||
"INPUT_TEXT": prompt_content.message.content,
|
||||
},
|
||||
remove_template_variables=False
|
||||
remove_template_variables=False,
|
||||
)
|
||||
statement_messages = [UserPromptMessage(content=statement_generate_prompt)]
|
||||
|
||||
try:
|
||||
parameter_content = model_instance.invoke_llm(
|
||||
prompt_messages=parameter_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
prompt_messages=parameter_messages, model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["variables"] = re.findall(r'"\s*([^"]+)\s*"', parameter_content.message.content)
|
||||
except InvokeError as e:
|
||||
@@ -257,9 +222,7 @@ class LLMGenerator:
|
||||
|
||||
try:
|
||||
statement_content = model_instance.invoke_llm(
|
||||
prompt_messages=statement_messages,
|
||||
model_parameters=model_parameters,
|
||||
stream=False
|
||||
prompt_messages=statement_messages, model_parameters=model_parameters, stream=False
|
||||
)
|
||||
rule_config["opening_statement"] = statement_content.message.content
|
||||
except InvokeError as e:
|
||||
@@ -284,18 +247,10 @@ class LLMGenerator:
|
||||
model_type=ModelType.LLM,
|
||||
)
|
||||
|
||||
prompt_messages = [
|
||||
SystemPromptMessage(content=prompt),
|
||||
UserPromptMessage(content=query)
|
||||
]
|
||||
prompt_messages = [SystemPromptMessage(content=prompt), UserPromptMessage(content=query)]
|
||||
|
||||
response = model_instance.invoke_llm(
|
||||
prompt_messages=prompt_messages,
|
||||
model_parameters={
|
||||
'temperature': 0.01,
|
||||
"max_tokens": 2000
|
||||
},
|
||||
stream=False
|
||||
prompt_messages=prompt_messages, model_parameters={"temperature": 0.01, "max_tokens": 2000}, stream=False
|
||||
)
|
||||
|
||||
answer = response.message.content
|
||||
|
@@ -10,9 +10,12 @@ from libs.json_in_md_parser import parse_and_check_json_markdown
|
||||
|
||||
|
||||
class RuleConfigGeneratorOutputParser:
|
||||
|
||||
def get_format_instructions(self) -> tuple[str, str, str]:
|
||||
return RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE, RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE
|
||||
return (
|
||||
RULE_CONFIG_PROMPT_GENERATE_TEMPLATE,
|
||||
RULE_CONFIG_PARAMETER_GENERATE_TEMPLATE,
|
||||
RULE_CONFIG_STATEMENT_GENERATE_TEMPLATE,
|
||||
)
|
||||
|
||||
def parse(self, text: str) -> Any:
|
||||
try:
|
||||
@@ -21,16 +24,9 @@ class RuleConfigGeneratorOutputParser:
|
||||
if not isinstance(parsed["prompt"], str):
|
||||
raise ValueError("Expected 'prompt' to be a string.")
|
||||
if not isinstance(parsed["variables"], list):
|
||||
raise ValueError(
|
||||
"Expected 'variables' to be a list."
|
||||
)
|
||||
raise ValueError("Expected 'variables' to be a list.")
|
||||
if not isinstance(parsed["opening_statement"], str):
|
||||
raise ValueError(
|
||||
"Expected 'opening_statement' to be a str."
|
||||
)
|
||||
raise ValueError("Expected 'opening_statement' to be a str.")
|
||||
return parsed
|
||||
except Exception as e:
|
||||
raise OutputParserException(
|
||||
f"Parsing text\n{text}\n of rule config generator raised following error:\n{e}"
|
||||
)
|
||||
|
||||
raise OutputParserException(f"Parsing text\n{text}\n of rule config generator raised following error:\n{e}")
|
||||
|
@@ -6,7 +6,6 @@ from core.llm_generator.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCT
|
||||
|
||||
|
||||
class SuggestedQuestionsAfterAnswerOutputParser:
|
||||
|
||||
def get_format_instructions(self) -> str:
|
||||
return SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT
|
||||
|
||||
@@ -15,7 +14,7 @@ class SuggestedQuestionsAfterAnswerOutputParser:
|
||||
if action_match is not None:
|
||||
json_obj = json.loads(action_match.group(0).strip())
|
||||
else:
|
||||
json_obj= []
|
||||
json_obj = []
|
||||
print(f"Could not parse LLM output: {text}")
|
||||
|
||||
return json_obj
|
||||
|
@@ -66,19 +66,19 @@ SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = (
|
||||
"and keeping each question under 20 characters.\n"
|
||||
"MAKE SURE your output is the SAME language as the Assistant's latest response(if the main response is written in Chinese, then the language of your output must be using Chinese.)!\n"
|
||||
"The output must be an array in JSON format following the specified schema:\n"
|
||||
"[\"question1\",\"question2\",\"question3\"]\n"
|
||||
'["question1","question2","question3"]\n'
|
||||
)
|
||||
|
||||
GENERATOR_QA_PROMPT = (
|
||||
'<Task> The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step.'
|
||||
'Step 1: Understand and summarize the main content of this text.\n'
|
||||
'Step 2: What key information or concepts are mentioned in this text?\n'
|
||||
'Step 3: Decompose or combine multiple pieces of information and concepts.\n'
|
||||
'Step 4: Generate questions and answers based on these key information and concepts.\n'
|
||||
'<Constraints> The questions should be clear and detailed, and the answers should be detailed and complete. '
|
||||
'You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n'
|
||||
'<Format> Use the following format: Q1:\nA1:\nQ2:\nA2:...\n'
|
||||
'<QA Pairs>'
|
||||
"<Task> The user will send a long text. Generate a Question and Answer pairs only using the knowledge in the long text. Please think step by step."
|
||||
"Step 1: Understand and summarize the main content of this text.\n"
|
||||
"Step 2: What key information or concepts are mentioned in this text?\n"
|
||||
"Step 3: Decompose or combine multiple pieces of information and concepts.\n"
|
||||
"Step 4: Generate questions and answers based on these key information and concepts.\n"
|
||||
"<Constraints> The questions should be clear and detailed, and the answers should be detailed and complete. "
|
||||
"You must answer in {language}, in a style that is clear and detailed in {language}. No language other than {language} should be used. \n"
|
||||
"<Format> Use the following format: Q1:\nA1:\nQ2:\nA2:...\n"
|
||||
"<QA Pairs>"
|
||||
)
|
||||
|
||||
WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE = """
|
||||
|
Reference in New Issue
Block a user