improve: introduce isort for linting Python imports (#1983)

This commit is contained in:
Bowen Liang
2024-01-12 12:34:01 +08:00
committed by GitHub
parent cca9edc97a
commit cc9e74123c
413 changed files with 1635 additions and 1906 deletions

View File

@@ -1,12 +1,11 @@
import os
import pytest
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.text_embedding.text_embedding import XinferenceTextEmbeddingModel
from tests.integration_tests.model_runtime.__mock.xinference import MOCK, setup_xinference_mock
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock, MOCK
@pytest.mark.parametrize('setup_xinference_mock', [['none']], indirect=True)
def test_validate_credentials(setup_xinference_mock):

View File

@@ -1,13 +1,12 @@
import os
import pytest
from typing import Generator
from core.model_runtime.entities.message_entities import AssistantPromptMessage, TextPromptMessageContent, UserPromptMessage, \
SystemPromptMessage, PromptMessageTool
import pytest
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import (AssistantPromptMessage, PromptMessageTool,
SystemPromptMessage, TextPromptMessageContent,
UserPromptMessage)
from core.model_runtime.entities.model_entities import AIModelEntity
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunkDelta, \
LLMResultChunk
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILargeLanguageModel
@@ -15,6 +14,7 @@ from core.model_runtime.model_providers.xinference.llm.llm import XinferenceAILa
from tests.integration_tests.model_runtime.__mock.openai import setup_openai_mock
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock
@pytest.mark.parametrize('setup_openai_mock, setup_xinference_mock', [['chat', 'none']], indirect=True)
def test_validate_credentials_for_chat_model(setup_openai_mock, setup_xinference_mock):
model = XinferenceAILargeLanguageModel()

View File

@@ -1,11 +1,11 @@
import os
import pytest
import pytest
from core.model_runtime.entities.rerank_entities import RerankResult
from core.model_runtime.errors.validate import CredentialsValidateFailedError
from core.model_runtime.model_providers.xinference.rerank.rerank import XinferenceRerankModel
from tests.integration_tests.model_runtime.__mock.xinference import MOCK, setup_xinference_mock
from tests.integration_tests.model_runtime.__mock.xinference import setup_xinference_mock, MOCK
@pytest.mark.parametrize('setup_xinference_mock', [['none']], indirect=True)
def test_validate_credentials(setup_xinference_mock):