feat: add models for gitee.ai (#9490)
This commit is contained in:
132
api/tests/integration_tests/model_runtime/gitee_ai/test_llm.py
Normal file
132
api/tests/integration_tests/model_runtime/gitee_ai/test_llm.py
Normal file
@@ -0,0 +1,132 @@
|
||||
import os
|
||||
from collections.abc import Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
|
||||
from core.model_runtime.entities.message_entities import (
|
||||
AssistantPromptMessage,
|
||||
PromptMessageTool,
|
||||
SystemPromptMessage,
|
||||
UserPromptMessage,
|
||||
)
|
||||
from core.model_runtime.entities.model_entities import AIModelEntity
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gitee_ai.llm.llm import GiteeAILargeLanguageModel
|
||||
|
||||
|
||||
def test_predefined_models():
|
||||
model = GiteeAILargeLanguageModel()
|
||||
model_schemas = model.predefined_models()
|
||||
|
||||
assert len(model_schemas) >= 1
|
||||
assert isinstance(model_schemas[0], AIModelEntity)
|
||||
|
||||
|
||||
def test_validate_credentials_for_chat_model():
|
||||
model = GiteeAILargeLanguageModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
# model name to gpt-3.5-turbo because of mocking
|
||||
model.validate_credentials(model="gpt-3.5-turbo", credentials={"api_key": "invalid_key"})
|
||||
|
||||
model.validate_credentials(
|
||||
model="Qwen2-7B-Instruct",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_chat_model():
|
||||
model = GiteeAILargeLanguageModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="Qwen2-7B-Instruct",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
model_parameters={
|
||||
"temperature": 0.0,
|
||||
"top_p": 1.0,
|
||||
"presence_penalty": 0.0,
|
||||
"frequency_penalty": 0.0,
|
||||
"max_tokens": 10,
|
||||
"stream": False,
|
||||
},
|
||||
stop=["How"],
|
||||
stream=False,
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, LLMResult)
|
||||
assert len(result.message.content) > 0
|
||||
|
||||
|
||||
def test_invoke_stream_chat_model():
|
||||
model = GiteeAILargeLanguageModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="Qwen2-7B-Instruct",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
model_parameters={"temperature": 0.0, "max_tokens": 100, "stream": False},
|
||||
stream=True,
|
||||
user="foo",
|
||||
)
|
||||
|
||||
assert isinstance(result, Generator)
|
||||
|
||||
for chunk in result:
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
if chunk.delta.finish_reason is not None:
|
||||
assert chunk.delta.usage is not None
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = GiteeAILargeLanguageModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="Qwen2-7B-Instruct",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
prompt_messages=[UserPromptMessage(content="Hello World!")],
|
||||
)
|
||||
|
||||
assert num_tokens == 10
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="Qwen2-7B-Instruct",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
prompt_messages=[
|
||||
SystemPromptMessage(
|
||||
content="You are a helpful AI assistant.",
|
||||
),
|
||||
UserPromptMessage(content="Hello World!"),
|
||||
],
|
||||
tools=[
|
||||
PromptMessageTool(
|
||||
name="get_weather",
|
||||
description="Determine weather in my location",
|
||||
parameters={
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {"type": "string", "description": "The city and state e.g. San Francisco, CA"},
|
||||
"unit": {"type": "string", "enum": ["c", "f"]},
|
||||
},
|
||||
"required": ["location"],
|
||||
},
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
assert num_tokens == 77
|
@@ -0,0 +1,15 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gitee_ai.gitee_ai import GiteeAIProvider
|
||||
|
||||
|
||||
def test_validate_provider_credentials():
|
||||
provider = GiteeAIProvider()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
provider.validate_provider_credentials(credentials={"api_key": "invalid_key"})
|
||||
|
||||
provider.validate_provider_credentials(credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")})
|
@@ -0,0 +1,47 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.rerank_entities import RerankResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gitee_ai.rerank.rerank import GiteeAIRerankModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GiteeAIRerankModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="bge-reranker-v2-m3",
|
||||
credentials={"api_key": "invalid_key"},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="bge-reranker-v2-m3",
|
||||
credentials={
|
||||
"api_key": os.environ.get("GITEE_AI_API_KEY"),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GiteeAIRerankModel()
|
||||
result = model.invoke(
|
||||
model="bge-reranker-v2-m3",
|
||||
credentials={
|
||||
"api_key": os.environ.get("GITEE_AI_API_KEY"),
|
||||
},
|
||||
query="What is the capital of the United States?",
|
||||
docs=[
|
||||
"Carson City is the capital city of the American state of Nevada. At the 2010 United States "
|
||||
"Census, Carson City had a population of 55,274.",
|
||||
"The Commonwealth of the Northern Mariana Islands is a group of islands in the Pacific Ocean that "
|
||||
"are a political division controlled by the United States. Its capital is Saipan.",
|
||||
],
|
||||
top_n=1,
|
||||
score_threshold=0.01,
|
||||
)
|
||||
|
||||
assert isinstance(result, RerankResult)
|
||||
assert len(result.docs) == 1
|
||||
assert result.docs[0].score >= 0.01
|
@@ -0,0 +1,45 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gitee_ai.speech2text.speech2text import GiteeAISpeech2TextModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GiteeAISpeech2TextModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model="whisper-base",
|
||||
credentials={"api_key": "invalid_key"},
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model="whisper-base",
|
||||
credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")},
|
||||
)
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GiteeAISpeech2TextModel()
|
||||
|
||||
# Get the directory of the current file
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
# Get assets directory
|
||||
assets_dir = os.path.join(os.path.dirname(current_dir), "assets")
|
||||
|
||||
# Construct the path to the audio file
|
||||
audio_file_path = os.path.join(assets_dir, "audio.mp3")
|
||||
|
||||
# Open the file and get the file object
|
||||
with open(audio_file_path, "rb") as audio_file:
|
||||
file = audio_file
|
||||
|
||||
result = model.invoke(
|
||||
model="whisper-base", credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")}, file=file
|
||||
)
|
||||
|
||||
assert isinstance(result, str)
|
||||
assert result == "1 2 3 4 5 6 7 8 9 10"
|
@@ -0,0 +1,46 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
|
||||
from core.model_runtime.entities.text_embedding_entities import TextEmbeddingResult
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.gitee_ai.text_embedding.text_embedding import GiteeAIEmbeddingModel
|
||||
|
||||
|
||||
def test_validate_credentials():
|
||||
model = GiteeAIEmbeddingModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(model="bge-large-zh-v1.5", credentials={"api_key": "invalid_key"})
|
||||
|
||||
model.validate_credentials(model="bge-large-zh-v1.5", credentials={"api_key": os.environ.get("GITEE_AI_API_KEY")})
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GiteeAIEmbeddingModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="bge-large-zh-v1.5",
|
||||
credentials={
|
||||
"api_key": os.environ.get("GITEE_AI_API_KEY"),
|
||||
},
|
||||
texts=["hello", "world"],
|
||||
user="user",
|
||||
)
|
||||
|
||||
assert isinstance(result, TextEmbeddingResult)
|
||||
assert len(result.embeddings) == 2
|
||||
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = GiteeAIEmbeddingModel()
|
||||
|
||||
num_tokens = model.get_num_tokens(
|
||||
model="bge-large-zh-v1.5",
|
||||
credentials={
|
||||
"api_key": os.environ.get("GITEE_AI_API_KEY"),
|
||||
},
|
||||
texts=["hello", "world"],
|
||||
)
|
||||
|
||||
assert num_tokens == 2
|
@@ -0,0 +1,23 @@
|
||||
import os
|
||||
|
||||
from core.model_runtime.model_providers.gitee_ai.tts.tts import GiteeAIText2SpeechModel
|
||||
|
||||
|
||||
def test_invoke_model():
|
||||
model = GiteeAIText2SpeechModel()
|
||||
|
||||
result = model.invoke(
|
||||
model="speecht5_tts",
|
||||
tenant_id="test",
|
||||
credentials={
|
||||
"api_key": os.environ.get("GITEE_AI_API_KEY"),
|
||||
},
|
||||
content_text="Hello, world!",
|
||||
voice="",
|
||||
)
|
||||
|
||||
content = b""
|
||||
for chunk in result:
|
||||
content += chunk
|
||||
|
||||
assert content != b""
|
Reference in New Issue
Block a user