fix: prevent timeout in file encoding detection for large files (#21453)

Co-authored-by: crazywoola <427733928@qq.com>
This commit is contained in:
baonudesifeizhai
2025-07-03 05:06:49 -04:00
committed by GitHub
parent ed54bd5121
commit 1c7404099d
2 changed files with 12 additions and 4 deletions

View File

@@ -1,7 +1,6 @@
"""Document loader helpers."""
import concurrent.futures
from pathlib import Path
from typing import NamedTuple, Optional, cast
@@ -16,7 +15,7 @@ class FileEncoding(NamedTuple):
"""The language of the file."""
def detect_file_encodings(file_path: str, timeout: int = 5) -> list[FileEncoding]:
def detect_file_encodings(file_path: str, timeout: int = 5, sample_size: int = 1024 * 1024) -> list[FileEncoding]:
"""Try to detect the file encoding.
Returns a list of `FileEncoding` tuples with the detected encodings ordered
@@ -25,11 +24,16 @@ def detect_file_encodings(file_path: str, timeout: int = 5) -> list[FileEncoding
Args:
file_path: The path to the file to detect the encoding for.
timeout: The timeout in seconds for the encoding detection.
sample_size: The number of bytes to read for encoding detection. Default is 1MB.
For large files, reading only a sample is sufficient and prevents timeout.
"""
import chardet
def read_and_detect(file_path: str) -> list[dict]:
rawdata = Path(file_path).read_bytes()
with open(file_path, "rb") as f:
# Read only a sample of the file for encoding detection
# This prevents timeout on large files while still providing accurate encoding detection
rawdata = f.read(sample_size)
return cast(list[dict], chardet.detect_all(rawdata))
with concurrent.futures.ThreadPoolExecutor() as executor: