chore: refurbish Python code by applying refurb linter rules (#8296)

This commit is contained in:
Bowen Liang
2024-09-12 15:50:49 +08:00
committed by GitHub
parent c69f5b07ba
commit 40fb4d16ef
105 changed files with 220 additions and 276 deletions

View File

@@ -106,7 +106,7 @@ class ApiToolProviderController(ToolProviderController):
"human": {"en_US": tool_bundle.summary or "", "zh_Hans": tool_bundle.summary or ""},
"llm": tool_bundle.summary or "",
},
"parameters": tool_bundle.parameters if tool_bundle.parameters else [],
"parameters": tool_bundle.parameters or [],
}
)

View File

@@ -1,4 +1,5 @@
import json
import operator
from typing import Any, Union
import boto3
@@ -71,7 +72,7 @@ class SageMakerReRankTool(BuiltinTool):
candidate_docs[idx]["score"] = scores[idx]
line = 8
sorted_candidate_docs = sorted(candidate_docs, key=lambda x: x["score"], reverse=True)
sorted_candidate_docs = sorted(candidate_docs, key=operator.itemgetter("score"), reverse=True)
line = 9
return [self.create_json_message(res) for res in sorted_candidate_docs[: self.topk]]

View File

@@ -115,7 +115,7 @@ class GetWorksheetFieldsTool(BuiltinTool):
fields.append(field)
fields_list.append(
f"|{field['id']}|{field['name']}|{field['type']}|{field['typeId']}|{field['description']}"
f"|{field['options'] if field['options'] else ''}|"
f"|{field['options'] or ''}|"
)
fields.append(

View File

@@ -130,7 +130,7 @@ class GetWorksheetPivotDataTool(BuiltinTool):
# ]
rows = []
for row in data["data"]:
row_data = row["rows"] if row["rows"] else {}
row_data = row["rows"] or {}
row_data.update(row["columns"])
row_data.update(row["values"])
rows.append(row_data)

View File

@@ -113,7 +113,7 @@ class ListWorksheetRecordsTool(BuiltinTool):
result_text = f"Found {result['total']} rows in worksheet \"{worksheet_name}\"."
if result["total"] > 0:
result_text += (
f" The following are {result['total'] if result['total'] < limit else limit}"
f" The following are {min(limit, result['total'])}"
f" pieces of data presented in a table format:\n\n{table_header}"
)
for row in rows:

View File

@@ -37,7 +37,7 @@ class SearchAPI:
return {
"engine": "youtube_transcripts",
"video_id": video_id,
"lang": language if language else "en",
"lang": language or "en",
**{key: value for key, value in kwargs.items() if value not in [None, ""]},
}

View File

@@ -160,7 +160,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
hit_callback.on_query(query, dataset.id)
# get retrieval model , if the model is not setting , using default
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
retrieval_model = dataset.retrieval_model or default_retrieval_model
if dataset.indexing_technique == "economy":
# use keyword table query
@@ -183,9 +183,7 @@ class DatasetMultiRetrieverTool(DatasetRetrieverBaseTool):
reranking_model=retrieval_model.get("reranking_model", None)
if retrieval_model["reranking_enable"]
else None,
reranking_mode=retrieval_model.get("reranking_mode")
if retrieval_model.get("reranking_mode")
else "reranking_model",
reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model",
weights=retrieval_model.get("weights", None),
)

View File

@@ -55,7 +55,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
hit_callback.on_query(query, dataset.id)
# get retrieval model , if the model is not setting , using default
retrieval_model = dataset.retrieval_model if dataset.retrieval_model else default_retrieval_model
retrieval_model = dataset.retrieval_model or default_retrieval_model
if dataset.indexing_technique == "economy":
# use keyword table query
documents = RetrievalService.retrieve(
@@ -76,9 +76,7 @@ class DatasetRetrieverTool(DatasetRetrieverBaseTool):
reranking_model=retrieval_model.get("reranking_model", None)
if retrieval_model["reranking_enable"]
else None,
reranking_mode=retrieval_model.get("reranking_mode")
if retrieval_model.get("reranking_mode")
else "reranking_model",
reranking_mode=retrieval_model.get("reranking_mode") or "reranking_model",
weights=retrieval_model.get("weights", None),
)
else:

View File

@@ -8,6 +8,7 @@ import subprocess
import tempfile
import unicodedata
from contextlib import contextmanager
from pathlib import Path
from urllib.parse import unquote
import chardet
@@ -98,7 +99,7 @@ def get_url(url: str, user_agent: str = None) -> str:
authors=a["byline"],
publish_date=a["date"],
top_image="",
text=a["plain_text"] if a["plain_text"] else "",
text=a["plain_text"] or "",
)
return res
@@ -117,8 +118,7 @@ def extract_using_readabilipy(html):
subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path])
# Read output of call to Readability.parse() from JSON file and return as Python dictionary
with open(article_json_path, encoding="utf-8") as json_file:
input_json = json.loads(json_file.read())
input_json = json.loads(Path(article_json_path).read_text(encoding="utf-8"))
# Deleting files after processing
os.unlink(article_json_path)

View File

@@ -21,7 +21,7 @@ def load_yaml_file(file_path: str, ignore_error: bool = True, default_value: Any
with open(file_path, encoding="utf-8") as yaml_file:
try:
yaml_content = yaml.safe_load(yaml_file)
return yaml_content if yaml_content else default_value
return yaml_content or default_value
except Exception as e:
raise YAMLError(f"Failed to load YAML file {file_path}: {e}")
except Exception as e: