update dataset clean rule (#9426)

This commit is contained in:
Jyong
2024-10-17 10:40:22 +08:00
committed by GitHub
parent bd678f9ca1
commit 5aabb83f5a
3 changed files with 19 additions and 105 deletions

View File

@@ -17,10 +17,11 @@ from services.feature_service import FeatureService
@app.celery.task(queue="dataset")
def clean_unused_datasets_task():
click.echo(click.style("Start clean unused datasets indexes.", fg="green"))
clean_days = dify_config.CLEAN_DAY_SETTING
plan_sandbox_clean_day_setting = dify_config.PLAN_SANDBOX_CLEAN_DAY_SETTING
plan_pro_clean_day_setting = dify_config.PLAN_PRO_CLEAN_DAY_SETTING
start_at = time.perf_counter()
thirty_days_ago = datetime.datetime.now() - datetime.timedelta(days=clean_days)
seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)
plan_sandbox_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_sandbox_clean_day_setting)
plan_pro_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_pro_clean_day_setting)
page = 1
while True:
try:
@@ -31,7 +32,7 @@ def clean_unused_datasets_task():
Document.indexing_status == "completed",
Document.enabled == True,
Document.archived == False,
Document.updated_at > thirty_days_ago,
Document.updated_at > plan_sandbox_clean_day,
)
.group_by(Document.dataset_id)
.subquery()
@@ -44,7 +45,7 @@ def clean_unused_datasets_task():
Document.indexing_status == "completed",
Document.enabled == True,
Document.archived == False,
Document.updated_at < thirty_days_ago,
Document.updated_at < plan_sandbox_clean_day,
)
.group_by(Document.dataset_id)
.subquery()
@@ -56,7 +57,7 @@ def clean_unused_datasets_task():
.outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
.outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
.filter(
Dataset.created_at < thirty_days_ago,
Dataset.created_at < plan_sandbox_clean_day,
func.coalesce(document_subquery_new.c.document_count, 0) == 0,
func.coalesce(document_subquery_old.c.document_count, 0) > 0,
)
@@ -72,7 +73,7 @@ def clean_unused_datasets_task():
for dataset in datasets:
dataset_query = (
db.session.query(DatasetQuery)
.filter(DatasetQuery.created_at > thirty_days_ago, DatasetQuery.dataset_id == dataset.id)
.filter(DatasetQuery.created_at > plan_sandbox_clean_day, DatasetQuery.dataset_id == dataset.id)
.all()
)
if not dataset_query or len(dataset_query) == 0:
@@ -101,7 +102,7 @@ def clean_unused_datasets_task():
Document.indexing_status == "completed",
Document.enabled == True,
Document.archived == False,
Document.updated_at > seven_days_ago,
Document.updated_at > plan_pro_clean_day,
)
.group_by(Document.dataset_id)
.subquery()
@@ -114,7 +115,7 @@ def clean_unused_datasets_task():
Document.indexing_status == "completed",
Document.enabled == True,
Document.archived == False,
Document.updated_at < seven_days_ago,
Document.updated_at < plan_pro_clean_day,
)
.group_by(Document.dataset_id)
.subquery()
@@ -126,7 +127,7 @@ def clean_unused_datasets_task():
.outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id)
.outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id)
.filter(
Dataset.created_at < seven_days_ago,
Dataset.created_at < plan_pro_clean_day,
func.coalesce(document_subquery_new.c.document_count, 0) == 0,
func.coalesce(document_subquery_old.c.document_count, 0) > 0,
)
@@ -142,7 +143,7 @@ def clean_unused_datasets_task():
for dataset in datasets:
dataset_query = (
db.session.query(DatasetQuery)
.filter(DatasetQuery.created_at > seven_days_ago, DatasetQuery.dataset_id == dataset.id)
.filter(DatasetQuery.created_at > plan_pro_clean_day, DatasetQuery.dataset_id == dataset.id)
.all()
)
if not dataset_query or len(dataset_query) == 0: