Feat/fix ops trace (#5672)
Co-authored-by: takatost <takatost@gmail.com>
This commit is contained in:
@@ -352,6 +352,101 @@ class Document(db.Model):
|
||||
return DocumentSegment.query.with_entities(func.coalesce(func.sum(DocumentSegment.hit_count))) \
|
||||
.filter(DocumentSegment.document_id == self.id).scalar()
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'tenant_id': self.tenant_id,
|
||||
'dataset_id': self.dataset_id,
|
||||
'position': self.position,
|
||||
'data_source_type': self.data_source_type,
|
||||
'data_source_info': self.data_source_info,
|
||||
'dataset_process_rule_id': self.dataset_process_rule_id,
|
||||
'batch': self.batch,
|
||||
'name': self.name,
|
||||
'created_from': self.created_from,
|
||||
'created_by': self.created_by,
|
||||
'created_api_request_id': self.created_api_request_id,
|
||||
'created_at': self.created_at,
|
||||
'processing_started_at': self.processing_started_at,
|
||||
'file_id': self.file_id,
|
||||
'word_count': self.word_count,
|
||||
'parsing_completed_at': self.parsing_completed_at,
|
||||
'cleaning_completed_at': self.cleaning_completed_at,
|
||||
'splitting_completed_at': self.splitting_completed_at,
|
||||
'tokens': self.tokens,
|
||||
'indexing_latency': self.indexing_latency,
|
||||
'completed_at': self.completed_at,
|
||||
'is_paused': self.is_paused,
|
||||
'paused_by': self.paused_by,
|
||||
'paused_at': self.paused_at,
|
||||
'error': self.error,
|
||||
'stopped_at': self.stopped_at,
|
||||
'indexing_status': self.indexing_status,
|
||||
'enabled': self.enabled,
|
||||
'disabled_at': self.disabled_at,
|
||||
'disabled_by': self.disabled_by,
|
||||
'archived': self.archived,
|
||||
'archived_reason': self.archived_reason,
|
||||
'archived_by': self.archived_by,
|
||||
'archived_at': self.archived_at,
|
||||
'updated_at': self.updated_at,
|
||||
'doc_type': self.doc_type,
|
||||
'doc_metadata': self.doc_metadata,
|
||||
'doc_form': self.doc_form,
|
||||
'doc_language': self.doc_language,
|
||||
'display_status': self.display_status,
|
||||
'data_source_info_dict': self.data_source_info_dict,
|
||||
'average_segment_length': self.average_segment_length,
|
||||
'dataset_process_rule': self.dataset_process_rule.to_dict() if self.dataset_process_rule else None,
|
||||
'dataset': self.dataset.to_dict() if self.dataset else None,
|
||||
'segment_count': self.segment_count,
|
||||
'hit_count': self.hit_count
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict):
|
||||
return cls(
|
||||
id=data.get('id'),
|
||||
tenant_id=data.get('tenant_id'),
|
||||
dataset_id=data.get('dataset_id'),
|
||||
position=data.get('position'),
|
||||
data_source_type=data.get('data_source_type'),
|
||||
data_source_info=data.get('data_source_info'),
|
||||
dataset_process_rule_id=data.get('dataset_process_rule_id'),
|
||||
batch=data.get('batch'),
|
||||
name=data.get('name'),
|
||||
created_from=data.get('created_from'),
|
||||
created_by=data.get('created_by'),
|
||||
created_api_request_id=data.get('created_api_request_id'),
|
||||
created_at=data.get('created_at'),
|
||||
processing_started_at=data.get('processing_started_at'),
|
||||
file_id=data.get('file_id'),
|
||||
word_count=data.get('word_count'),
|
||||
parsing_completed_at=data.get('parsing_completed_at'),
|
||||
cleaning_completed_at=data.get('cleaning_completed_at'),
|
||||
splitting_completed_at=data.get('splitting_completed_at'),
|
||||
tokens=data.get('tokens'),
|
||||
indexing_latency=data.get('indexing_latency'),
|
||||
completed_at=data.get('completed_at'),
|
||||
is_paused=data.get('is_paused'),
|
||||
paused_by=data.get('paused_by'),
|
||||
paused_at=data.get('paused_at'),
|
||||
error=data.get('error'),
|
||||
stopped_at=data.get('stopped_at'),
|
||||
indexing_status=data.get('indexing_status'),
|
||||
enabled=data.get('enabled'),
|
||||
disabled_at=data.get('disabled_at'),
|
||||
disabled_by=data.get('disabled_by'),
|
||||
archived=data.get('archived'),
|
||||
archived_reason=data.get('archived_reason'),
|
||||
archived_by=data.get('archived_by'),
|
||||
archived_at=data.get('archived_at'),
|
||||
updated_at=data.get('updated_at'),
|
||||
doc_type=data.get('doc_type'),
|
||||
doc_metadata=data.get('doc_metadata'),
|
||||
doc_form=data.get('doc_form'),
|
||||
doc_language=data.get('doc_language')
|
||||
)
|
||||
|
||||
class DocumentSegment(db.Model):
|
||||
__tablename__ = 'document_segments'
|
||||
|
@@ -838,6 +838,49 @@ class Message(db.Model):
|
||||
|
||||
return None
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
'id': self.id,
|
||||
'app_id': self.app_id,
|
||||
'conversation_id': self.conversation_id,
|
||||
'inputs': self.inputs,
|
||||
'query': self.query,
|
||||
'message': self.message,
|
||||
'answer': self.answer,
|
||||
'status': self.status,
|
||||
'error': self.error,
|
||||
'message_metadata': self.message_metadata_dict,
|
||||
'from_source': self.from_source,
|
||||
'from_end_user_id': self.from_end_user_id,
|
||||
'from_account_id': self.from_account_id,
|
||||
'created_at': self.created_at.isoformat(),
|
||||
'updated_at': self.updated_at.isoformat(),
|
||||
'agent_based': self.agent_based,
|
||||
'workflow_run_id': self.workflow_run_id
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict):
|
||||
return cls(
|
||||
id=data['id'],
|
||||
app_id=data['app_id'],
|
||||
conversation_id=data['conversation_id'],
|
||||
inputs=data['inputs'],
|
||||
query=data['query'],
|
||||
message=data['message'],
|
||||
answer=data['answer'],
|
||||
status=data['status'],
|
||||
error=data['error'],
|
||||
message_metadata=json.dumps(data['message_metadata']),
|
||||
from_source=data['from_source'],
|
||||
from_end_user_id=data['from_end_user_id'],
|
||||
from_account_id=data['from_account_id'],
|
||||
created_at=data['created_at'],
|
||||
updated_at=data['updated_at'],
|
||||
agent_based=data['agent_based'],
|
||||
workflow_run_id=data['workflow_run_id']
|
||||
)
|
||||
|
||||
|
||||
class MessageFeedback(db.Model):
|
||||
__tablename__ = 'message_feedbacks'
|
||||
|
@@ -324,6 +324,55 @@ class WorkflowRun(db.Model):
|
||||
def workflow(self):
|
||||
return db.session.query(Workflow).filter(Workflow.id == self.workflow_id).first()
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'tenant_id': self.tenant_id,
|
||||
'app_id': self.app_id,
|
||||
'sequence_number': self.sequence_number,
|
||||
'workflow_id': self.workflow_id,
|
||||
'type': self.type,
|
||||
'triggered_from': self.triggered_from,
|
||||
'version': self.version,
|
||||
'graph': self.graph_dict,
|
||||
'inputs': self.inputs_dict,
|
||||
'status': self.status,
|
||||
'outputs': self.outputs_dict,
|
||||
'error': self.error,
|
||||
'elapsed_time': self.elapsed_time,
|
||||
'total_tokens': self.total_tokens,
|
||||
'total_steps': self.total_steps,
|
||||
'created_by_role': self.created_by_role,
|
||||
'created_by': self.created_by,
|
||||
'created_at': self.created_at,
|
||||
'finished_at': self.finished_at,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> 'WorkflowRun':
|
||||
return cls(
|
||||
id=data.get('id'),
|
||||
tenant_id=data.get('tenant_id'),
|
||||
app_id=data.get('app_id'),
|
||||
sequence_number=data.get('sequence_number'),
|
||||
workflow_id=data.get('workflow_id'),
|
||||
type=data.get('type'),
|
||||
triggered_from=data.get('triggered_from'),
|
||||
version=data.get('version'),
|
||||
graph=json.dumps(data.get('graph')),
|
||||
inputs=json.dumps(data.get('inputs')),
|
||||
status=data.get('status'),
|
||||
outputs=json.dumps(data.get('outputs')),
|
||||
error=data.get('error'),
|
||||
elapsed_time=data.get('elapsed_time'),
|
||||
total_tokens=data.get('total_tokens'),
|
||||
total_steps=data.get('total_steps'),
|
||||
created_by_role=data.get('created_by_role'),
|
||||
created_by=data.get('created_by'),
|
||||
created_at=data.get('created_at'),
|
||||
finished_at=data.get('finished_at'),
|
||||
)
|
||||
|
||||
|
||||
class WorkflowNodeExecutionTriggeredFrom(Enum):
|
||||
"""
|
||||
|
Reference in New Issue
Block a user