Model Runtime (#1858)
Co-authored-by: StyleZhang <jasonapring2015@outlook.com> Co-authored-by: Garfield Dai <dai.hai@foxmail.com> Co-authored-by: chenhe <guchenhe@gmail.com> Co-authored-by: jyong <jyong@dify.ai> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Yeuoly <admin@srmxy.cn>
This commit is contained in:
104
api/tests/integration_tests/model_runtime/openllm/test_llm.py
Normal file
104
api/tests/integration_tests/model_runtime/openllm/test_llm.py
Normal file
@@ -0,0 +1,104 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
from typing import Generator
|
||||
|
||||
from core.model_runtime.entities.message_entities import AssistantPromptMessage, UserPromptMessage
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunkDelta, \
|
||||
LLMResultChunk
|
||||
from core.model_runtime.errors.validate import CredentialsValidateFailedError
|
||||
from core.model_runtime.model_providers.openllm.llm.llm import OpenLLMLargeLanguageModel
|
||||
|
||||
def test_validate_credentials_for_chat_model():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
with pytest.raises(CredentialsValidateFailedError):
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
credentials={
|
||||
'server_url': 'invalid_key',
|
||||
}
|
||||
)
|
||||
|
||||
model.validate_credentials(
|
||||
model='NOT IMPORTANT',
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
}
|
||||
)
|
||||
|
||||
def test_invoke_model():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='NOT IMPORTANT',
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.7,
|
||||
'top_p': 1.0,
|
||||
'top_k': 1,
|
||||
},
|
||||
stop=['you'],
|
||||
user="abc-123",
|
||||
stream=False
|
||||
)
|
||||
|
||||
assert isinstance(response, LLMResult)
|
||||
assert len(response.message.content) > 0
|
||||
assert response.usage.total_tokens > 0
|
||||
|
||||
def test_invoke_stream_model():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.invoke(
|
||||
model='NOT IMPORTANT',
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
model_parameters={
|
||||
'temperature': 0.7,
|
||||
'top_p': 1.0,
|
||||
'top_k': 1,
|
||||
},
|
||||
stop=['you'],
|
||||
stream=True,
|
||||
user="abc-123"
|
||||
)
|
||||
|
||||
assert isinstance(response, Generator)
|
||||
for chunk in response:
|
||||
assert isinstance(chunk, LLMResultChunk)
|
||||
assert isinstance(chunk.delta, LLMResultChunkDelta)
|
||||
assert isinstance(chunk.delta.message, AssistantPromptMessage)
|
||||
assert len(chunk.delta.message.content) > 0 if chunk.delta.finish_reason is None else True
|
||||
|
||||
def test_get_num_tokens():
|
||||
model = OpenLLMLargeLanguageModel()
|
||||
|
||||
response = model.get_num_tokens(
|
||||
model='NOT IMPORTANT',
|
||||
credentials={
|
||||
'server_url': os.environ.get('OPENLLM_SERVER_URL'),
|
||||
},
|
||||
prompt_messages=[
|
||||
UserPromptMessage(
|
||||
content='Hello World!'
|
||||
)
|
||||
],
|
||||
tools=[]
|
||||
)
|
||||
|
||||
assert isinstance(response, int)
|
||||
assert response == 3
|
Reference in New Issue
Block a user