Fix/ignore economy dataset (#1043)
Co-authored-by: jyong <jyong@dify.ai>
This commit is contained in:
@@ -49,18 +49,20 @@ def batch_create_segment_to_index_task(job_id: str, content: List, dataset_id: s
|
||||
if not dataset_document.enabled or dataset_document.archived or dataset_document.indexing_status != 'completed':
|
||||
raise ValueError('Document is not available.')
|
||||
document_segments = []
|
||||
for segment in content:
|
||||
content = segment['content']
|
||||
doc_id = str(uuid.uuid4())
|
||||
segment_hash = helper.generate_text_hash(content)
|
||||
embedding_model = None
|
||||
if dataset.indexing_technique == 'high_quality':
|
||||
embedding_model = ModelFactory.get_embedding_model(
|
||||
tenant_id=dataset.tenant_id,
|
||||
model_provider_name=dataset.embedding_model_provider,
|
||||
model_name=dataset.embedding_model
|
||||
)
|
||||
|
||||
for segment in content:
|
||||
content = segment['content']
|
||||
doc_id = str(uuid.uuid4())
|
||||
segment_hash = helper.generate_text_hash(content)
|
||||
# calc embedding use tokens
|
||||
tokens = embedding_model.get_num_tokens(content)
|
||||
tokens = embedding_model.get_num_tokens(content) if embedding_model else 0
|
||||
max_position = db.session.query(func.max(DocumentSegment.position)).filter(
|
||||
DocumentSegment.document_id == dataset_document.id
|
||||
).scalar()
|
||||
|
@@ -3,8 +3,10 @@ import time
|
||||
|
||||
import click
|
||||
from celery import shared_task
|
||||
from flask import current_app
|
||||
|
||||
from core.index.index import IndexBuilder
|
||||
from core.index.vector_index.vector_index import VectorIndex
|
||||
from extensions.ext_database import db
|
||||
from models.dataset import DocumentSegment, Dataset, DatasetKeywordTable, DatasetQuery, DatasetProcessRule, \
|
||||
AppDatasetJoin, Document
|
||||
@@ -35,11 +37,11 @@ def clean_dataset_task(dataset_id: str, tenant_id: str, indexing_technique: str,
|
||||
documents = db.session.query(Document).filter(Document.dataset_id == dataset_id).all()
|
||||
segments = db.session.query(DocumentSegment).filter(DocumentSegment.dataset_id == dataset_id).all()
|
||||
|
||||
vector_index = IndexBuilder.get_index(dataset, 'high_quality')
|
||||
kw_index = IndexBuilder.get_index(dataset, 'economy')
|
||||
|
||||
# delete from vector index
|
||||
if vector_index:
|
||||
if dataset.indexing_technique == 'high_quality':
|
||||
vector_index = IndexBuilder.get_default_high_quality_index(dataset)
|
||||
try:
|
||||
vector_index.delete()
|
||||
except Exception:
|
||||
|
@@ -31,7 +31,7 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
raise Exception('Dataset not found')
|
||||
|
||||
if action == "remove":
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=True)
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=False)
|
||||
index.delete()
|
||||
elif action == "add":
|
||||
dataset_documents = db.session.query(DatasetDocument).filter(
|
||||
@@ -43,7 +43,7 @@ def deal_dataset_vector_index_task(dataset_id: str, action: str):
|
||||
|
||||
if dataset_documents:
|
||||
# save vector index
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=True)
|
||||
index = IndexBuilder.get_index(dataset, 'high_quality', ignore_high_quality_check=False)
|
||||
documents = []
|
||||
for dataset_document in dataset_documents:
|
||||
# delete from vector index
|
||||
|
Reference in New Issue
Block a user