Chore : rm dead code detected by pylance (#24588)

This commit is contained in:
Yongtao Huang
2025-08-27 13:19:40 +08:00
committed by GitHub
parent c06cfcbb5a
commit 826f19e968
8 changed files with 1 additions and 25 deletions

View File

@@ -95,7 +95,6 @@ class ToolBuiltinProviderInfoApi(Resource):
def get(self, provider):
user = current_user
user_id = user.id
tenant_id = user.current_tenant_id
return jsonable_encoder(BuiltinToolManageService.get_builtin_tool_provider_info(tenant_id, provider))

View File

@@ -210,13 +210,6 @@ class IndexingRunner:
documents.append(document)
# build index
# get the process rule
processing_rule = (
db.session.query(DatasetProcessRule)
.where(DatasetProcessRule.id == dataset_document.dataset_process_rule_id)
.first()
)
index_type = dataset_document.doc_form
index_processor = IndexProcessorFactory(index_type).init_index_processor()
self._load(

View File

@@ -401,7 +401,6 @@ class LLMGenerator:
def instruction_modify_legacy(
tenant_id: str, flow_id: str, current: str, instruction: str, model_config: dict, ideal_output: str | None
) -> dict:
app: App | None = db.session.query(App).where(App.id == flow_id).first()
last_run: Message | None = (
db.session.query(Message).where(Message.app_id == flow_id).order_by(Message.created_at.desc()).first()
)

View File

@@ -276,7 +276,6 @@ class OracleVector(BaseVector):
if not isinstance(top_k, int) or top_k <= 0 or top_k > 10000:
top_k = 5 # Use default if invalid
# just not implement fetch by score_threshold now, may be later
score_threshold = float(kwargs.get("score_threshold") or 0.0)
if len(query) > 0:
# Check which language the query is in
zh_pattern = re.compile("[\u4e00-\u9fa5]+")

View File

@@ -292,7 +292,6 @@ class ClickZettaVolumeStorage(BaseStorage):
# Get the actual volume path (may include dify_km prefix)
volume_path = self._get_volume_path(filename, dataset_id)
actual_filename = volume_path.split("/")[-1] if "/" in volume_path else volume_path
# For User Volume, use the full path with dify_km prefix
if volume_prefix == "USER VOLUME":

View File

@@ -7,7 +7,7 @@
import json
import logging
from dataclasses import asdict, dataclass
from datetime import datetime, timedelta
from datetime import datetime
from enum import Enum
from typing import Any, Optional
@@ -185,7 +185,6 @@ class FileLifecycleManager:
versions.append(current_metadata)
# 获取历史版本
version_pattern = f"{self._version_prefix}{filename}.v*"
try:
version_files = self._storage.scan(self._dataset_id or "", files=True)
for file_path in version_files:
@@ -331,7 +330,6 @@ class FileLifecycleManager:
"""
try:
cleaned_count = 0
cutoff_date = datetime.now() - timedelta(days=max_age_days)
# 获取所有版本文件
try:

View File

@@ -2440,16 +2440,6 @@ class SegmentService:
with redis_client.lock(lock_name, timeout=20):
index_node_id = str(uuid.uuid4())
index_node_hash = helper.generate_text_hash(content)
child_chunk_count = (
db.session.query(ChildChunk)
.where(
ChildChunk.tenant_id == current_user.current_tenant_id,
ChildChunk.dataset_id == dataset.id,
ChildChunk.document_id == document.id,
ChildChunk.segment_id == segment.id,
)
.count()
)
max_position = (
db.session.query(func.max(ChildChunk.position))
.where(

View File

@@ -24,7 +24,6 @@ def retry_document_indexing_task(dataset_id: str, document_ids: list[str]):
Usage: retry_document_indexing_task.delay(dataset_id, document_ids)
"""
documents: list[Document] = []
start_at = time.perf_counter()
try:
dataset = db.session.query(Dataset).where(Dataset.id == dataset_id).first()