feat: support openllm embedding (#1293)

This commit is contained in:
takatost
2023-10-10 12:09:35 +08:00
committed by GitHub
parent 1d4f019de4
commit 4ab4bcc074
5 changed files with 171 additions and 12 deletions

View File

@@ -0,0 +1,22 @@
from core.third_party.langchain.embeddings.openllm_embedding import OpenLLMEmbeddings
from core.model_providers.error import LLMBadRequestError
from core.model_providers.providers.base import BaseModelProvider
from core.model_providers.models.embedding.base import BaseEmbedding
class OpenLLMEmbedding(BaseEmbedding):
def __init__(self, model_provider: BaseModelProvider, name: str):
credentials = model_provider.get_model_credentials(
model_name=name,
model_type=self.type
)
client = OpenLLMEmbeddings(
server_url=credentials['server_url']
)
super().__init__(model_provider, client, name)
def handle_exceptions(self, ex: Exception) -> Exception:
return LLMBadRequestError(f"OpenLLM embedding: {str(ex)}")

View File

@@ -1,5 +1,4 @@
from core.third_party.langchain.embeddings.xinference_embedding import XinferenceEmbedding as XinferenceEmbeddings
from replicate.exceptions import ModelError, ReplicateError
from core.model_providers.error import LLMBadRequestError
from core.model_providers.providers.base import BaseModelProvider
@@ -21,7 +20,4 @@ class XinferenceEmbedding(BaseEmbedding):
super().__init__(model_provider, client, name)
def handle_exceptions(self, ex: Exception) -> Exception:
if isinstance(ex, (ModelError, ReplicateError)):
return LLMBadRequestError(f"Xinference embedding: {str(ex)}")
else:
return ex
return LLMBadRequestError(f"Xinference embedding: {str(ex)}")

View File

@@ -2,11 +2,13 @@ import json
from typing import Type
from core.helper import encrypter
from core.model_providers.models.embedding.openllm_embedding import OpenLLMEmbedding
from core.model_providers.models.entity.model_params import KwargRule, ModelKwargsRules, ModelType
from core.model_providers.models.llm.openllm_model import OpenLLMModel
from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError
from core.model_providers.models.base import BaseProviderModel
from core.third_party.langchain.embeddings.openllm_embedding import OpenLLMEmbeddings
from core.third_party.langchain.llms.openllm import OpenLLM
from models.provider import ProviderType
@@ -31,6 +33,8 @@ class OpenLLMProvider(BaseModelProvider):
"""
if model_type == ModelType.TEXT_GENERATION:
model_class = OpenLLMModel
elif model_type== ModelType.EMBEDDINGS:
model_class = OpenLLMEmbedding
else:
raise NotImplementedError
@@ -69,14 +73,21 @@ class OpenLLMProvider(BaseModelProvider):
'server_url': credentials['server_url']
}
llm = OpenLLM(
llm_kwargs={
'max_new_tokens': 10
},
**credential_kwargs
)
if model_type == ModelType.TEXT_GENERATION:
llm = OpenLLM(
llm_kwargs={
'max_new_tokens': 10
},
**credential_kwargs
)
llm("ping")
llm("ping")
elif model_type == ModelType.EMBEDDINGS:
embedding = OpenLLMEmbeddings(
**credential_kwargs
)
embedding.embed_query("ping")
except Exception as ex:
raise CredentialsValidateFailedError(str(ex))

View File

@@ -0,0 +1,67 @@
"""Wrapper around OpenLLM embedding models."""
from typing import Any, List, Optional
import requests
from pydantic import BaseModel, Extra
from langchain.embeddings.base import Embeddings
class OpenLLMEmbeddings(BaseModel, Embeddings):
"""Wrapper around OpenLLM embedding models.
"""
client: Any #: :meta private:
server_url: Optional[str] = None
"""Optional server URL that currently runs a LLMServer with 'openllm start'."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
def embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to OpenLLM's embedding endpoint.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
embeddings = []
for text in texts:
result = self.invoke_embedding(text=text)
embeddings.append(result)
return [list(map(float, e)) for e in embeddings]
def invoke_embedding(self, text):
params = [
text
]
headers = {"Content-Type": "application/json"}
response = requests.post(
f'{self.server_url}/v1/embeddings',
headers=headers,
json=params
)
if not response.ok:
raise ValueError(f"OpenLLM HTTP {response.status_code} error: {response.text}")
json_response = response.json()
return json_response[0]["embeddings"][0]
def embed_query(self, text: str) -> List[float]:
"""Call out to OpenLLM's embedding endpoint.
Args:
text: The text to embed.
Returns:
Embeddings for the text.
"""
return self.embed_documents([text])[0]