chore(api/tasks): apply ruff reformatting (#7594)
This commit is contained in:
@@ -14,7 +14,7 @@ from models.dataset import Document as DatasetDocument
|
||||
from models.dataset import DocumentSegment
|
||||
|
||||
|
||||
@shared_task(queue='dataset')
|
||||
@shared_task(queue="dataset")
|
||||
def add_document_to_index_task(dataset_document_id: str):
|
||||
"""
|
||||
Async Add document to index
|
||||
@@ -22,24 +22,25 @@ def add_document_to_index_task(dataset_document_id: str):
|
||||
|
||||
Usage: add_document_to_index.delay(document_id)
|
||||
"""
|
||||
logging.info(click.style('Start add document to index: {}'.format(dataset_document_id), fg='green'))
|
||||
logging.info(click.style("Start add document to index: {}".format(dataset_document_id), fg="green"))
|
||||
start_at = time.perf_counter()
|
||||
|
||||
dataset_document = db.session.query(DatasetDocument).filter(DatasetDocument.id == dataset_document_id).first()
|
||||
if not dataset_document:
|
||||
raise NotFound('Document not found')
|
||||
raise NotFound("Document not found")
|
||||
|
||||
if dataset_document.indexing_status != 'completed':
|
||||
if dataset_document.indexing_status != "completed":
|
||||
return
|
||||
|
||||
indexing_cache_key = 'document_{}_indexing'.format(dataset_document.id)
|
||||
indexing_cache_key = "document_{}_indexing".format(dataset_document.id)
|
||||
|
||||
try:
|
||||
segments = db.session.query(DocumentSegment).filter(
|
||||
DocumentSegment.document_id == dataset_document.id,
|
||||
DocumentSegment.enabled == True
|
||||
) \
|
||||
.order_by(DocumentSegment.position.asc()).all()
|
||||
segments = (
|
||||
db.session.query(DocumentSegment)
|
||||
.filter(DocumentSegment.document_id == dataset_document.id, DocumentSegment.enabled == True)
|
||||
.order_by(DocumentSegment.position.asc())
|
||||
.all()
|
||||
)
|
||||
|
||||
documents = []
|
||||
for segment in segments:
|
||||
@@ -50,7 +51,7 @@ def add_document_to_index_task(dataset_document_id: str):
|
||||
"doc_hash": segment.index_node_hash,
|
||||
"document_id": segment.document_id,
|
||||
"dataset_id": segment.dataset_id,
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
documents.append(document)
|
||||
@@ -58,7 +59,7 @@ def add_document_to_index_task(dataset_document_id: str):
|
||||
dataset = dataset_document.dataset
|
||||
|
||||
if not dataset:
|
||||
raise Exception('Document has no dataset')
|
||||
raise Exception("Document has no dataset")
|
||||
|
||||
index_type = dataset.doc_form
|
||||
index_processor = IndexProcessorFactory(index_type).init_index_processor()
|
||||
@@ -66,12 +67,15 @@ def add_document_to_index_task(dataset_document_id: str):
|
||||
|
||||
end_at = time.perf_counter()
|
||||
logging.info(
|
||||
click.style('Document added to index: {} latency: {}'.format(dataset_document.id, end_at - start_at), fg='green'))
|
||||
click.style(
|
||||
"Document added to index: {} latency: {}".format(dataset_document.id, end_at - start_at), fg="green"
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
logging.exception("add document to index failed")
|
||||
dataset_document.enabled = False
|
||||
dataset_document.disabled_at = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None)
|
||||
dataset_document.status = 'error'
|
||||
dataset_document.status = "error"
|
||||
dataset_document.error = str(e)
|
||||
db.session.commit()
|
||||
finally:
|
||||
|
Reference in New Issue
Block a user