chore: refurish python code by applying Pylint linter rules (#8322)
This commit is contained in:
@@ -1,15 +1,13 @@
|
||||
from collections.abc import Generator
|
||||
|
||||
import google.generativeai.types.content_types as content_types
|
||||
import google.generativeai.types.generation_types as generation_config_types
|
||||
import google.generativeai.types.safety_types as safety_types
|
||||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from google.ai import generativelanguage as glm
|
||||
from google.ai.generativelanguage_v1beta.types import content as gag_content
|
||||
from google.generativeai import GenerativeModel
|
||||
from google.generativeai.client import _ClientManager, configure
|
||||
from google.generativeai.types import GenerateContentResponse
|
||||
from google.generativeai.types import GenerateContentResponse, content_types, safety_types
|
||||
from google.generativeai.types.generation_types import BaseGenerateContentResponse
|
||||
|
||||
current_api_key = ""
|
||||
|
@@ -6,7 +6,6 @@ from time import time
|
||||
# import monkeypatch
|
||||
from typing import Any, Literal, Optional, Union
|
||||
|
||||
import openai.types.chat.completion_create_params as completion_create_params
|
||||
from openai import AzureOpenAI, OpenAI
|
||||
from openai._types import NOT_GIVEN, NotGiven
|
||||
from openai.resources.chat.completions import Completions
|
||||
@@ -18,6 +17,7 @@ from openai.types.chat import (
|
||||
ChatCompletionMessageToolCall,
|
||||
ChatCompletionToolChoiceOptionParam,
|
||||
ChatCompletionToolParam,
|
||||
completion_create_params,
|
||||
)
|
||||
from openai.types.chat.chat_completion import ChatCompletion as _ChatCompletion
|
||||
from openai.types.chat.chat_completion import Choice as _ChatCompletionChoice
|
||||
@@ -254,7 +254,7 @@ class MockChatClass:
|
||||
"gpt-3.5-turbo-16k-0613",
|
||||
]
|
||||
azure_openai_models = ["gpt35", "gpt-4v", "gpt-35-turbo"]
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._client.base_url.__str__()):
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
|
||||
raise InvokeAuthorizationError("Invalid base url")
|
||||
if model in openai_models + azure_openai_models:
|
||||
if not re.match(r"sk-[a-zA-Z0-9]{24,}$", self._client.api_key) and type(self._client) == OpenAI:
|
||||
|
@@ -112,7 +112,7 @@ class MockCompletionsClass:
|
||||
]
|
||||
azure_openai_models = ["gpt-35-turbo-instruct"]
|
||||
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._client.base_url.__str__()):
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
|
||||
raise InvokeAuthorizationError("Invalid base url")
|
||||
if model in openai_models + azure_openai_models:
|
||||
if not re.match(r"sk-[a-zA-Z0-9]{24,}$", self._client.api_key) and type(self._client) == OpenAI:
|
||||
|
@@ -22,7 +22,7 @@ class MockEmbeddingsClass:
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._client.base_url.__str__()):
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
|
||||
raise InvokeAuthorizationError("Invalid base url")
|
||||
|
||||
if len(self._client.api_key) < 18:
|
||||
|
@@ -20,7 +20,7 @@ class MockModerationClass:
|
||||
if isinstance(input, str):
|
||||
input = [input]
|
||||
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._client.base_url.__str__()):
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
|
||||
raise InvokeAuthorizationError("Invalid base url")
|
||||
|
||||
if len(self._client.api_key) < 18:
|
||||
|
@@ -20,7 +20,7 @@ class MockSpeech2TextClass:
|
||||
temperature: float | NotGiven = NOT_GIVEN,
|
||||
**kwargs: Any,
|
||||
) -> Transcription:
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", self._client.base_url.__str__()):
|
||||
if not re.match(r"^(https?):\/\/[^\s\/$.?#].[^\s]*$", str(self._client.base_url)):
|
||||
raise InvokeAuthorizationError("Invalid base url")
|
||||
|
||||
if len(self._client.api_key) < 18:
|
||||
|
@@ -42,7 +42,7 @@ class MockXinferenceClass:
|
||||
model_uid = url.split("/")[-1] or ""
|
||||
if not re.match(
|
||||
r"[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}", model_uid
|
||||
) and model_uid not in ["generate", "chat", "embedding", "rerank"]:
|
||||
) and model_uid not in {"generate", "chat", "embedding", "rerank"}:
|
||||
response.status_code = 404
|
||||
response._content = b"{}"
|
||||
return response
|
||||
@@ -53,7 +53,7 @@ class MockXinferenceClass:
|
||||
response._content = b"{}"
|
||||
return response
|
||||
|
||||
if model_uid in ["generate", "chat"]:
|
||||
if model_uid in {"generate", "chat"}:
|
||||
response.status_code = 200
|
||||
response._content = b"""{
|
||||
"model_type": "LLM",
|
||||
|
Reference in New Issue
Block a user