refactor: Use typed SQLAlchemy base model and fix type errors (#19980)

Signed-off-by: -LAN- <laipz8200@outlook.com>
This commit is contained in:
-LAN-
2025-05-21 15:38:03 +08:00
committed by GitHub
parent ef3569e667
commit 3196dc2d61
20 changed files with 272 additions and 174 deletions

View File

@@ -23,11 +23,10 @@ class VectorService:
):
documents: list[Document] = []
document: Document | None = None
for segment in segments:
if doc_form == IndexType.PARENT_CHILD_INDEX:
document = db.session.query(DatasetDocument).filter_by(id=segment.document_id).first()
if not document:
dataset_document = db.session.query(DatasetDocument).filter_by(id=segment.document_id).first()
if not dataset_document:
_logger.warning(
"Expected DatasetDocument record to exist, but none was found, document_id=%s, segment_id=%s",
segment.document_id,
@@ -37,7 +36,7 @@ class VectorService:
# get the process rule
processing_rule = (
db.session.query(DatasetProcessRule)
.filter(DatasetProcessRule.id == document.dataset_process_rule_id)
.filter(DatasetProcessRule.id == dataset_document.dataset_process_rule_id)
.first()
)
if not processing_rule:
@@ -61,9 +60,11 @@ class VectorService:
)
else:
raise ValueError("The knowledge base index technique is not high quality!")
cls.generate_child_chunks(segment, document, dataset, embedding_model_instance, processing_rule, False)
cls.generate_child_chunks(
segment, dataset_document, dataset, embedding_model_instance, processing_rule, False
)
else:
document = Document(
rag_document = Document(
page_content=segment.content,
metadata={
"doc_id": segment.index_node_id,
@@ -72,7 +73,7 @@ class VectorService:
"dataset_id": segment.dataset_id,
},
)
documents.append(document)
documents.append(rag_document)
if len(documents) > 0:
index_processor = IndexProcessorFactory(doc_form).init_index_processor()
index_processor.load(dataset, documents, with_keywords=True, keywords_list=keywords_list)