chore(api/tasks): apply ruff reformatting (#7594)

This commit is contained in:
Bowen Liang
2024-08-26 13:38:37 +08:00
committed by GitHub
parent 3be756eaed
commit 979422cdc6
29 changed files with 546 additions and 508 deletions

View File

@@ -13,7 +13,7 @@ from models.dataset import Dataset, Document, DocumentSegment
from services.feature_service import FeatureService
@shared_task(queue='dataset')
@shared_task(queue="dataset")
def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
"""
Async process document
@@ -37,16 +37,17 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
if count > batch_upload_limit:
raise ValueError(f"You have reached the batch upload limit of {batch_upload_limit}.")
if 0 < vector_space.limit <= vector_space.size:
raise ValueError("Your total number of documents plus the number of uploads have over the limit of "
"your subscription.")
raise ValueError(
"Your total number of documents plus the number of uploads have over the limit of "
"your subscription."
)
except Exception as e:
for document_id in document_ids:
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
document = (
db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
)
if document:
document.indexing_status = 'error'
document.indexing_status = "error"
document.error = str(e)
document.stopped_at = datetime.datetime.utcnow()
db.session.add(document)
@@ -54,12 +55,11 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
return
for document_id in document_ids:
logging.info(click.style('Start process document: {}'.format(document_id), fg='green'))
logging.info(click.style("Start process document: {}".format(document_id), fg="green"))
document = db.session.query(Document).filter(
Document.id == document_id,
Document.dataset_id == dataset_id
).first()
document = (
db.session.query(Document).filter(Document.id == document_id, Document.dataset_id == dataset_id).first()
)
if document:
# clean old data
@@ -77,7 +77,7 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
db.session.delete(segment)
db.session.commit()
document.indexing_status = 'parsing'
document.indexing_status = "parsing"
document.processing_started_at = datetime.datetime.utcnow()
documents.append(document)
db.session.add(document)
@@ -87,8 +87,8 @@ def duplicate_document_indexing_task(dataset_id: str, document_ids: list):
indexing_runner = IndexingRunner()
indexing_runner.run(documents)
end_at = time.perf_counter()
logging.info(click.style('Processed dataset: {} latency: {}'.format(dataset_id, end_at - start_at), fg='green'))
logging.info(click.style("Processed dataset: {} latency: {}".format(dataset_id, end_at - start_at), fg="green"))
except DocumentIsPausedException as ex:
logging.info(click.style(str(ex), fg='yellow'))
logging.info(click.style(str(ex), fg="yellow"))
except Exception:
pass