Fix variable typo (#8084)

This commit is contained in:
Nam Vu
2024-09-08 12:14:11 +07:00
committed by GitHub
parent b1918dae5e
commit 2d7954c7da
215 changed files with 599 additions and 597 deletions

View File

@@ -52,7 +52,7 @@
- `mode` (string) voice model.available for model type `tts`
- `name` (string) voice model display name.available for model type `tts`
- `language` (string) the voice model supports languages.available for model type `tts`
- `word_limit` (int) Single conversion word limit, paragraphwise by defaultavailable for model type `tts`
- `word_limit` (int) Single conversion word limit, paragraph-wise by defaultavailable for model type `tts`
- `audio_type` (string) Support audio file extension format, e.g.mp3,wavavailable for model type `tts`
- `max_workers` (int) Number of concurrent workers supporting text and audio conversionavailable for model type`tts`
- `max_characters_per_chunk` (int) Maximum characters per chunk (available for model type `moderation`)
@@ -150,7 +150,7 @@
- `input` (float) Input price, i.e., Prompt price
- `output` (float) Output price, i.e., returned content price
- `unit` (float) Pricing unit, e.g., if the price is meausred in 1M tokens, the corresponding token amount for the unit price is `0.000001`.
- `unit` (float) Pricing unit, e.g., if the price is measured in 1M tokens, the corresponding token amount for the unit price is `0.000001`.
- `currency` (string) Currency unit
### ProviderCredentialSchema

View File

@@ -13,7 +13,7 @@ logger = logging.getLogger(__name__)
class TTSModel(AIModel):
"""
Model class for ttstext model.
Model class for TTS model.
"""
model_type: ModelType = ModelType.TTS

View File

@@ -284,7 +284,7 @@ class AzureOpenAILargeLanguageModel(_CommonAzureOpenAI, LargeLanguageModel):
try:
schema = json.loads(json_schema)
except:
raise ValueError(f"not currect json_schema format: {json_schema}")
raise ValueError(f"not correct json_schema format: {json_schema}")
model_parameters.pop("json_schema")
model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema}
else:

View File

@@ -37,7 +37,7 @@ from core.model_runtime.model_providers.baichuan.llm.baichuan_turbo_errors impor
)
class BaichuanLarguageModel(LargeLanguageModel):
class BaichuanLanguageModel(LargeLanguageModel):
def _invoke(
self,

View File

@@ -60,7 +60,7 @@ class BaichuanTextEmbeddingModel(TextEmbeddingModel):
token_usage = 0
for chunk in chunks:
# embeding chunk
# embedding chunk
chunk_embeddings, chunk_usage = self.embedding(
model=model,
api_key=api_key,

View File

@@ -793,11 +793,11 @@ class BedrockLargeLanguageModel(LargeLanguageModel):
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the ermd = genai.GenerativeModel(model)ror type thrown to the caller
The value is the md = genai.GenerativeModel(model)error type thrown by the model,
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke emd = genai.GenerativeModel(model)rror mapping
:return: Invoke emd = genai.GenerativeModel(model) error mapping
"""
return {
InvokeConnectionError: [],

View File

@@ -130,11 +130,11 @@ class BedrockTextEmbeddingModel(TextEmbeddingModel):
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the ermd = genai.GenerativeModel(model)ror type thrown to the caller
The value is the md = genai.GenerativeModel(model)error type thrown by the model,
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke emd = genai.GenerativeModel(model)rror mapping
:return: Invoke emd = genai.GenerativeModel(model) error mapping
"""
return {
InvokeConnectionError: [],

View File

@@ -416,11 +416,11 @@ class GoogleLargeLanguageModel(LargeLanguageModel):
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the ermd = genai.GenerativeModel(model)ror type thrown to the caller
The value is the md = genai.GenerativeModel(model)error type thrown by the model,
The key is the ermd = genai.GenerativeModel(model) error type thrown to the caller
The value is the md = genai.GenerativeModel(model) error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke emd = genai.GenerativeModel(model)rror mapping
:return: Invoke emd = genai.GenerativeModel(model) error mapping
"""
return {
InvokeConnectionError: [

View File

@@ -86,7 +86,7 @@ class MinimaxLargeLanguageModel(LargeLanguageModel):
Calculate num tokens for minimax model
not like ChatGLM, Minimax has a special prompt structure, we could not find a proper way
to caculate the num tokens, so we use str() to convert the prompt to string
to calculate the num tokens, so we use str() to convert the prompt to string
Minimax does not provide their own tokenizer of adab5.5 and abab5 model
therefore, we use gpt2 tokenizer instead

View File

@@ -10,6 +10,7 @@ from core.model_runtime.model_providers.openai_api_compatible.llm.llm import OAI
class NovitaLargeLanguageModel(OAIAPICompatLargeLanguageModel):
def _update_endpoint_url(self, credentials: dict):
credentials['endpoint_url'] = "https://api.novita.ai/v3/openai"
credentials['extra_headers'] = { 'X-Novita-Source': 'dify.ai' }
return credentials

View File

@@ -243,7 +243,7 @@ class OCILargeLanguageModel(LargeLanguageModel):
request_args["compartmentId"] = compartment_id
request_args["servingMode"]["modelId"] = model
chathistory = []
chat_history = []
system_prompts = []
#if "meta.llama" in model:
# request_args["chatRequest"]["apiFormat"] = "GENERIC"
@@ -273,16 +273,16 @@ class OCILargeLanguageModel(LargeLanguageModel):
if isinstance(message.content, str):
text = message.content
if isinstance(message, UserPromptMessage):
chathistory.append({"role": "USER", "message": text})
chat_history.append({"role": "USER", "message": text})
else:
chathistory.append({"role": "CHATBOT", "message": text})
chat_history.append({"role": "CHATBOT", "message": text})
if isinstance(message, SystemPromptMessage):
if isinstance(message.content, str):
system_prompts.append(message.content)
args = {"apiFormat": "COHERE",
"preambleOverride": ' '.join(system_prompts),
"message": prompt_messages[-1].content,
"chatHistory": chathistory, }
"chatHistory": chat_history, }
request_args["chatRequest"].update(args)
elif model.startswith("meta"):
#print("run meta " * 10)

View File

@@ -552,7 +552,7 @@ class OpenAILargeLanguageModel(_CommonOpenAI, LargeLanguageModel):
try:
schema = json.loads(json_schema)
except:
raise ValueError(f"not currect json_schema format: {json_schema}")
raise ValueError(f"not correct json_schema format: {json_schema}")
model_parameters.pop("json_schema")
model_parameters["response_format"] = {"type": "json_schema", "json_schema": schema}
else:

View File

@@ -67,7 +67,7 @@ class FlashRecognitionRequest:
class FlashRecognizer:
"""
reponse:
response:
request_id string
status Integer
message String
@@ -132,9 +132,9 @@ class FlashRecognizer:
signstr = self._format_sign_string(query)
signature = self._sign(signstr, secret_key)
header["Authorization"] = signature
requrl = "https://"
requrl += signstr[4::]
return requrl
req_url = "https://"
req_url += signstr[4::]
return req_url
def _create_query_arr(self, req):
return {

View File

@@ -695,11 +695,11 @@ class VertexAiLargeLanguageModel(LargeLanguageModel):
def _invoke_error_mapping(self) -> dict[type[InvokeError], list[type[Exception]]]:
"""
Map model invoke error to unified error
The key is the ermd = gml.GenerativeModel(model)ror type thrown to the caller
The value is the md = gml.GenerativeModel(model)error type thrown by the model,
The key is the ermd = gml.GenerativeModel(model) error type thrown to the caller
The value is the md = gml.GenerativeModel(model) error type thrown by the model,
which needs to be converted into a unified error type for the caller.
:return: Invoke emd = gml.GenerativeModel(model)rror mapping
:return: Invoke emd = gml.GenerativeModel(model) error mapping
"""
return {
InvokeConnectionError: [

View File

@@ -135,16 +135,16 @@ class HttpClient:
**kwargs,
)
def _object_to_formfata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
def _object_to_formdata(self, key: str, value: Data | Mapping[object, object]) -> list[tuple[str, str]]:
items = []
if isinstance(value, Mapping):
for k, v in value.items():
items.extend(self._object_to_formfata(f"{key}[{k}]", v))
items.extend(self._object_to_formdata(f"{key}[{k}]", v))
return items
if isinstance(value, list | tuple):
for v in value:
items.extend(self._object_to_formfata(key + "[]", v))
items.extend(self._object_to_formdata(key + "[]", v))
return items
def _primitive_value_to_str(val) -> str:
@@ -165,7 +165,7 @@ class HttpClient:
def _make_multipartform(self, data: Mapping[object, object]) -> dict[str, object]:
items = flatten([self._object_to_formfata(k, v) for k, v in data.items()])
items = flatten([self._object_to_formdata(k, v) for k, v in data.items()])
serialized: dict[str, object] = {}
for key, value in items: