diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index a21c67ed5..a0f3ac7f8 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -204,6 +204,7 @@ class LangFuseDataTrace(BaseTraceInstance): node_generation_data = LangfuseGeneration( name="llm", trace_id=trace_id, + model=process_data.get("model_name"), parent_observation_id=node_execution_id, start_time=created_at, end_time=finished_at, diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index fde8a06c6..cc242905b 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -139,8 +139,7 @@ class LangSmithDataTrace(BaseTraceInstance): json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} ) node_total_tokens = execution_metadata.get("total_tokens", 0) - - metadata = json.loads(node_execution.execution_metadata) if node_execution.execution_metadata else {} + metadata = execution_metadata.copy() metadata.update( { "workflow_run_id": trace_info.workflow_run_id, @@ -156,6 +155,12 @@ class LangSmithDataTrace(BaseTraceInstance): process_data = json.loads(node_execution.process_data) if node_execution.process_data else {} if process_data and process_data.get("model_mode") == "chat": run_type = LangSmithRunType.llm + metadata.update( + { + 'ls_provider': process_data.get('model_provider', ''), + 'ls_model_name': process_data.get('model_name', ''), + } + ) elif node_type == "knowledge-retrieval": run_type = LangSmithRunType.retriever else: diff --git a/api/core/workflow/nodes/llm/llm_node.py b/api/core/workflow/nodes/llm/llm_node.py index eb8921b52..49f61bd59 100644 --- a/api/core/workflow/nodes/llm/llm_node.py +++ b/api/core/workflow/nodes/llm/llm_node.py @@ -109,7 +109,9 @@ class LLMNode(BaseNode): 'prompts': PromptMessageUtil.prompt_messages_to_prompt_for_saving( model_mode=model_config.mode, prompt_messages=prompt_messages - ) + ), + 'model_provider': model_config.provider, + 'model_name': model_config.model, } # handle invoke result