improve: introduce isort for linting Python imports (#1983)

This commit is contained in:
Bowen Liang
2024-01-12 12:34:01 +08:00
committed by GitHub
parent cca9edc97a
commit cc9e74123c
413 changed files with 1635 additions and 1906 deletions

View File

@@ -1,9 +1,9 @@
from __future__ import annotations
from abc import abstractmethod, ABC
from typing import List, Any
from langchain.schema import Document, BaseRetriever
from abc import ABC, abstractmethod
from typing import Any, List
from langchain.schema import BaseRetriever, Document
from models.dataset import Dataset

View File

@@ -1,11 +1,10 @@
from flask import current_app
from langchain.embeddings import OpenAIEmbeddings
from core.embedding.cached_embedding import CacheEmbedding
from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig
from core.index.keyword_table_index.keyword_table_index import KeywordTableConfig, KeywordTableIndex
from core.index.vector_index.vector_index import VectorIndex
from core.model_manager import ModelManager
from core.model_runtime.entities.model_entities import ModelType
from flask import current_app
from langchain.embeddings import OpenAIEmbeddings
from models.dataset import Dataset

View File

@@ -2,9 +2,8 @@ import re
from typing import Set
import jieba
from jieba.analyse import default_tfidf
from core.index.keyword_table_index.stopwords import STOPWORDS
from jieba.analyse import default_tfidf
class JiebaKeywordTableHandler:

View File

@@ -1,14 +1,13 @@
import json
from collections import defaultdict
from typing import Any, List, Optional, Dict
from langchain.schema import Document, BaseRetriever
from pydantic import BaseModel, Field, Extra
from typing import Any, Dict, List, Optional
from core.index.base import BaseIndex
from core.index.keyword_table_index.jieba_keyword_table_handler import JiebaKeywordTableHandler
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment, DatasetKeywordTable
from langchain.schema import BaseRetriever, Document
from models.dataset import Dataset, DatasetKeywordTable, DocumentSegment
from pydantic import BaseModel, Extra, Field
class KeywordTableConfig(BaseModel):

View File

@@ -1,16 +1,16 @@
import json
import logging
from abc import abstractmethod
from typing import List, Any, cast
from langchain.embeddings.base import Embeddings
from langchain.schema import Document, BaseRetriever
from langchain.vectorstores import VectorStore
from typing import Any, List, cast
from core.index.base import BaseIndex
from extensions.ext_database import db
from models.dataset import Dataset, DocumentSegment, DatasetCollectionBinding
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import VectorStore
from models.dataset import Dataset, DatasetCollectionBinding
from models.dataset import Document as DatasetDocument
from models.dataset import DocumentSegment
class BaseVectorIndex(BaseIndex):

View File

@@ -1,14 +1,13 @@
from typing import cast, Any, List
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import VectorStore
from pydantic import BaseModel, root_validator
from typing import Any, List, cast
from core.index.base import BaseIndex
from core.index.vector_index.base import BaseVectorIndex
from core.vector_store.milvus_vector_store import MilvusVectorStore
from langchain.embeddings.base import Embeddings
from langchain.schema import Document
from langchain.vectorstores import VectorStore
from models.dataset import Dataset
from pydantic import BaseModel, root_validator
class MilvusConfig(BaseModel):

View File

@@ -1,18 +1,17 @@
import os
from typing import Optional, Any, List, cast
from typing import Any, List, Optional, cast
import qdrant_client
from langchain.embeddings.base import Embeddings
from langchain.schema import Document, BaseRetriever
from langchain.vectorstores import VectorStore
from pydantic import BaseModel
from qdrant_client.http.models import HnswConfigDiff
from core.index.base import BaseIndex
from core.index.vector_index.base import BaseVectorIndex
from core.vector_store.qdrant_vector_store import QdrantVectorStore
from extensions.ext_database import db
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import VectorStore
from models.dataset import Dataset, DatasetCollectionBinding
from pydantic import BaseModel
from qdrant_client.http.models import HnswConfigDiff
class QdrantConfig(BaseModel):

View File

@@ -1,10 +1,9 @@
import json
from flask import current_app
from langchain.embeddings.base import Embeddings
from core.index.vector_index.base import BaseVectorIndex
from extensions.ext_database import db
from flask import current_app
from langchain.embeddings.base import Embeddings
from models.dataset import Dataset, Document
@@ -29,7 +28,7 @@ class VectorIndex:
raise ValueError(f"Vector store must be specified.")
if vector_type == "weaviate":
from core.index.vector_index.weaviate_vector_index import WeaviateVectorIndex, WeaviateConfig
from core.index.vector_index.weaviate_vector_index import WeaviateConfig, WeaviateVectorIndex
return WeaviateVectorIndex(
dataset=dataset,
@@ -42,7 +41,7 @@ class VectorIndex:
attributes=attributes
)
elif vector_type == "qdrant":
from core.index.vector_index.qdrant_vector_index import QdrantVectorIndex, QdrantConfig
from core.index.vector_index.qdrant_vector_index import QdrantConfig, QdrantVectorIndex
return QdrantVectorIndex(
dataset=dataset,
@@ -55,7 +54,7 @@ class VectorIndex:
embeddings=embeddings
)
elif vector_type == "milvus":
from core.index.vector_index.milvus_vector_index import MilvusVectorIndex, MilvusConfig
from core.index.vector_index.milvus_vector_index import MilvusConfig, MilvusVectorIndex
return MilvusVectorIndex(
dataset=dataset,

View File

@@ -1,16 +1,15 @@
from typing import Optional, cast, Any, List
from typing import Any, List, Optional, cast
import requests
import weaviate
from langchain.embeddings.base import Embeddings
from langchain.schema import Document, BaseRetriever
from langchain.vectorstores import VectorStore
from pydantic import BaseModel, root_validator
from core.index.base import BaseIndex
from core.index.vector_index.base import BaseVectorIndex
from core.vector_store.weaviate_vector_store import WeaviateVectorStore
from langchain.embeddings.base import Embeddings
from langchain.schema import BaseRetriever, Document
from langchain.vectorstores import VectorStore
from models.dataset import Dataset
from pydantic import BaseModel, root_validator
class WeaviateConfig(BaseModel):