From 5fa2161b05919c278093839157882afabda63395 Mon Sep 17 00:00:00 2001 From: takatost Date: Sat, 12 Aug 2023 00:57:00 +0800 Subject: [PATCH] feat: server multi models support (#799) --- .../workflows/check_no_chinese_comments.py | 3 +- api/.env.example | 26 + api/app.py | 19 +- api/commands.py | 37 +- api/config.py | 50 +- api/controllers/console/__init__.py | 5 +- api/controllers/console/app/app.py | 26 +- api/controllers/console/app/audio.py | 2 +- api/controllers/console/app/completion.py | 12 +- api/controllers/console/app/generator.py | 2 +- api/controllers/console/app/message.py | 2 +- api/controllers/console/app/model_config.py | 4 +- .../console/datasets/data_source.py | 2 +- api/controllers/console/datasets/datasets.py | 32 +- .../console/datasets/datasets_document.py | 48 +- .../console/datasets/hit_testing.py | 4 +- api/controllers/console/explore/audio.py | 2 +- api/controllers/console/explore/completion.py | 2 +- api/controllers/console/explore/message.py | 2 +- api/controllers/console/explore/parameter.py | 5 +- .../console/universal_chat/audio.py | 2 +- .../console/universal_chat/chat.py | 10 +- .../console/universal_chat/message.py | 2 +- .../console/universal_chat/parameter.py | 5 +- .../console/webhook}/__init__.py | 0 api/controllers/console/webhook/stripe.py | 53 ++ .../console/workspace/model_providers.py | 444 +++++++------- api/controllers/console/workspace/models.py | 108 ++++ .../console/workspace/providers.py | 130 +++++ .../console/workspace/workspace.py | 2 +- api/controllers/service_api/app/app.py | 5 +- api/controllers/service_api/app/audio.py | 2 +- api/controllers/service_api/app/completion.py | 2 +- .../service_api/dataset/document.py | 2 +- api/controllers/web/app.py | 5 +- api/controllers/web/audio.py | 2 +- api/controllers/web/completion.py | 2 +- api/controllers/web/message.py | 2 +- api/core/__init__.py | 36 -- api/core/agent/agent/calc_token_mixin.py | 22 +- .../agent/agent/multi_dataset_router_agent.py | 10 +- api/core/agent/agent/openai_function_call.py | 5 +- .../openai_function_call_summarize_mixin.py | 14 +- .../agent/agent/openai_multi_function_call.py | 5 +- .../structed_multi_dataset_router_agent.py | 162 ++++++ api/core/agent/agent/structured_chat.py | 10 +- api/core/agent/agent_executor.py | 36 +- .../agent_loop_gather_callback_handler.py | 9 +- .../callback_handler/llm_callback_handler.py | 18 +- .../main_chain_gather_callback_handler.py | 2 - api/core/completion.py | 197 +++---- api/core/constant/llm_constant.py | 109 ---- api/core/conversation_message_task.py | 59 +- api/core/docstore/dataset_docstore.py | 14 +- api/core/embedding/cached_embedding.py | 60 +- api/core/generator/llm_generator.py | 142 ++--- .../test_helpers => core/helper}/__init__.py | 0 api/core/helper/encrypter.py | 20 + api/core/index/index.py | 14 +- api/core/indexing_runner.py | 89 +-- api/core/llm/llm_builder.py | 148 ----- api/core/llm/moderation.py | 15 - api/core/llm/provider/anthropic_provider.py | 138 ----- api/core/llm/provider/azure_provider.py | 145 ----- api/core/llm/provider/base.py | 132 ----- api/core/llm/provider/errors.py | 2 - api/core/llm/provider/huggingface_provider.py | 22 - api/core/llm/provider/llm_provider_service.py | 53 -- api/core/llm/provider/openai_provider.py | 55 -- api/core/llm/streamable_chat_anthropic.py | 62 -- api/core/llm/token_calculator.py | 41 -- api/core/llm/whisper.py | 26 - api/core/llm/wrappers/anthropic_wrapper.py | 27 - api/core/llm/wrappers/openai_wrapper.py | 31 - ...versation_token_db_buffer_shared_memory.py | 24 +- api/core/{llm => model_providers}/error.py | 0 api/core/model_providers/model_factory.py | 293 ++++++++++ .../model_providers/model_provider_factory.py | 228 ++++++++ .../model_providers/models}/__init__.py | 0 api/core/model_providers/models/base.py | 22 + .../models/embedding}/__init__.py | 0 .../embedding/azure_openai_embedding.py | 78 +++ .../model_providers/models/embedding/base.py | 40 ++ .../models/embedding/minimax_embedding.py | 35 ++ .../models/embedding/openai_embedding.py | 72 +++ .../models/embedding/replicate_embedding.py | 36 ++ .../models/entity}/__init__.py | 0 .../model_providers/models/entity/message.py | 53 ++ .../models/entity/model_params.py | 59 ++ .../model_providers/models/entity/provider.py | 10 + .../model_providers/models/llm/__init__.py | 0 .../models/llm/anthropic_model.py | 107 ++++ .../models/llm/azure_openai_model.py | 177 ++++++ api/core/model_providers/models/llm/base.py | 269 +++++++++ .../models/llm/chatglm_model.py | 70 +++ .../models/llm/huggingface_hub_model.py | 82 +++ .../models/llm/minimax_model.py | 70 +++ .../models/llm/openai_model.py | 219 +++++++ .../models/llm/replicate_model.py | 103 ++++ .../model_providers/models/llm/spark_model.py | 73 +++ .../models/llm/tongyi_model.py | 77 +++ .../models/llm/wenxin_model.py | 92 +++ .../models/moderation/__init__.py | 0 .../models/moderation/openai_moderation.py | 48 ++ .../models/speech2text/__init__.py | 0 .../models/speech2text/base.py | 29 + .../models/speech2text/openai_whisper.py | 47 ++ .../model_providers/providers/__init__.py | 0 .../providers/anthropic_provider.py | 224 ++++++++ .../providers/azure_openai_provider.py | 387 +++++++++++++ api/core/model_providers/providers/base.py | 283 +++++++++ .../providers/chatglm_provider.py | 157 +++++ api/core/model_providers/providers/hosted.py | 76 +++ .../providers/huggingface_hub_provider.py | 183 ++++++ .../providers/minimax_provider.py | 179 ++++++ .../providers/openai_provider.py | 289 ++++++++++ .../providers/replicate_provider.py | 184 ++++++ .../providers/spark_provider.py | 191 +++++++ .../providers/tongyi_provider.py | 157 +++++ .../providers/wenxin_provider.py | 182 ++++++ api/core/model_providers/rules.py | 47 ++ .../model_providers/rules/_providers.json | 12 + api/core/model_providers/rules/anthropic.json | 15 + .../model_providers/rules/azure_openai.json | 7 + api/core/model_providers/rules/chatglm.json | 7 + .../rules/huggingface_hub.json | 7 + api/core/model_providers/rules/minimax.json | 13 + api/core/model_providers/rules/openai.json | 14 + api/core/model_providers/rules/replicate.json | 7 + api/core/model_providers/rules/spark.json | 13 + api/core/model_providers/rules/tongyi.json | 7 + api/core/model_providers/rules/wenxin.json | 7 + api/core/orchestrator_rule_parser.py | 87 +-- .../langchain/embeddings/__init__.py | 0 .../embeddings/replicate_embedding.py | 99 ++++ .../third_party/langchain/llms/__init__.py | 0 .../langchain/llms/azure_chat_open_ai.py} | 42 +- .../langchain/llms/azure_open_ai.py} | 24 +- .../langchain/llms/chat_open_ai.py} | 36 +- .../langchain/llms}/fake.py | 10 +- .../langchain/llms/open_ai.py} | 22 +- .../langchain/llms/replicate_llm.py | 75 +++ api/core/third_party/langchain/llms/spark.py | 185 ++++++ .../third_party/langchain/llms/tongyi_llm.py | 82 +++ api/core/third_party/langchain/llms/wenxin.py | 233 ++++++++ api/core/third_party/spark/__init__.py | 0 api/core/third_party/spark/spark_llm.py | 150 +++++ api/core/tool/dataset_index_tool.py | 102 ---- api/core/tool/dataset_retriever_tool.py | 13 +- api/events/event_handlers/__init__.py | 2 - .../create_provider_when_tenant_created.py | 24 - .../create_provider_when_tenant_updated.py | 24 - ...rsation_name_when_first_message_created.py | 1 - api/extensions/ext_stripe.py | 6 + api/libs/rsa.py | 43 +- ...16fa53d9faec_add_provider_model_support.py | 79 +++ ...022897aaceb_add_model_name_in_embedding.py | 36 ++ .../bf0aec5ba2cf_add_provider_order.py | 52 ++ ...ed59becda_modify_quota_limit_field_type.py | 46 ++ api/models/dataset.py | 19 +- api/models/provider.py | 122 +++- api/requirements.txt | 11 +- api/services/app_model_config_service.py | 88 ++- api/services/audio_service.py | 16 +- api/services/completion_service.py | 6 +- api/services/dataset_service.py | 17 +- api/services/hit_testing_service.py | 13 +- api/services/provider_checkout_service.py | 158 +++++ api/services/provider_service.py | 541 ++++++++++++++++-- api/services/workspace_service.py | 27 +- api/tests/conftest.py | 50 -- api/tests/integration_tests/.env.example | 35 ++ api/tests/integration_tests/__init__.py | 0 api/tests/integration_tests/conftest.py | 19 + .../integration_tests/models/__init__.py | 0 .../models/embedding/__init__.py | 0 .../embedding/test_azure_openai_embedding.py | 57 ++ .../embedding/test_minimax_embedding.py | 44 ++ .../models/embedding/test_openai_embedding.py | 40 ++ .../embedding/test_replicate_embedding.py | 64 +++ .../integration_tests/models/llm/__init__.py | 0 .../models/llm/test_anthropic_model.py | 61 ++ .../models/llm/test_azure_openai_model.py | 86 +++ .../models/llm/test_huggingface_hub_model.py | 124 ++++ .../models/llm/test_minimax_model.py | 64 +++ .../models/llm/test_openai_model.py | 80 +++ .../models/llm/test_replicate_model.py | 73 +++ .../models/llm/test_spark_model.py | 69 +++ .../models/llm/test_tongyi_model.py | 61 ++ .../models/llm/test_wenxin_model.py | 63 ++ .../models/moderation/__init__.py | 0 .../moderation/test_openai_moderation.py | 40 ++ .../models/speech2text/__init__.py | 0 .../models/speech2text/audio.mp3 | Bin 0 -> 218880 bytes .../models/speech2text/test_openai_whisper.py | 50 ++ .../test_controllers/test_account_api.py.bak | 75 --- api/tests/test_controllers/test_login.py | 108 ---- api/tests/test_controllers/test_setup.py | 80 --- api/tests/test_factory.py | 22 - api/tests/unit_tests/__init__.py | 0 .../unit_tests/model_providers/__init__.py | 0 .../model_providers/fake_model_provider.py | 44 ++ .../test_anthropic_provider.py | 123 ++++ .../test_azure_openai_provider.py | 117 ++++ .../test_base_model_provider.py | 72 +++ .../model_providers/test_chatglm_provider.py | 89 +++ .../test_huggingface_hub_provider.py | 161 ++++++ .../model_providers/test_minimax_provider.py | 88 +++ .../model_providers/test_openai_provider.py | 126 ++++ .../test_replicate_provider.py | 125 ++++ .../model_providers/test_spark_provider.py | 97 ++++ .../model_providers/test_tongyi_provider.py | 90 +++ .../model_providers/test_wenxin_provider.py | 93 +++ 213 files changed, 10556 insertions(+), 2579 deletions(-) rename api/{tests/test_controllers => controllers/console/webhook}/__init__.py (100%) create mode 100644 api/controllers/console/webhook/stripe.py create mode 100644 api/controllers/console/workspace/models.py create mode 100644 api/controllers/console/workspace/providers.py create mode 100644 api/core/agent/agent/structed_multi_dataset_router_agent.py delete mode 100644 api/core/constant/llm_constant.py rename api/{tests/test_helpers => core/helper}/__init__.py (100%) create mode 100644 api/core/helper/encrypter.py delete mode 100644 api/core/llm/llm_builder.py delete mode 100644 api/core/llm/moderation.py delete mode 100644 api/core/llm/provider/anthropic_provider.py delete mode 100644 api/core/llm/provider/azure_provider.py delete mode 100644 api/core/llm/provider/base.py delete mode 100644 api/core/llm/provider/errors.py delete mode 100644 api/core/llm/provider/huggingface_provider.py delete mode 100644 api/core/llm/provider/llm_provider_service.py delete mode 100644 api/core/llm/provider/openai_provider.py delete mode 100644 api/core/llm/streamable_chat_anthropic.py delete mode 100644 api/core/llm/token_calculator.py delete mode 100644 api/core/llm/whisper.py delete mode 100644 api/core/llm/wrappers/anthropic_wrapper.py delete mode 100644 api/core/llm/wrappers/openai_wrapper.py rename api/core/{llm => model_providers}/error.py (100%) create mode 100644 api/core/model_providers/model_factory.py create mode 100644 api/core/model_providers/model_provider_factory.py rename api/{tests/test_libs => core/model_providers/models}/__init__.py (100%) create mode 100644 api/core/model_providers/models/base.py rename api/{tests/test_models => core/model_providers/models/embedding}/__init__.py (100%) create mode 100644 api/core/model_providers/models/embedding/azure_openai_embedding.py create mode 100644 api/core/model_providers/models/embedding/base.py create mode 100644 api/core/model_providers/models/embedding/minimax_embedding.py create mode 100644 api/core/model_providers/models/embedding/openai_embedding.py create mode 100644 api/core/model_providers/models/embedding/replicate_embedding.py rename api/{tests/test_services => core/model_providers/models/entity}/__init__.py (100%) create mode 100644 api/core/model_providers/models/entity/message.py create mode 100644 api/core/model_providers/models/entity/model_params.py create mode 100644 api/core/model_providers/models/entity/provider.py create mode 100644 api/core/model_providers/models/llm/__init__.py create mode 100644 api/core/model_providers/models/llm/anthropic_model.py create mode 100644 api/core/model_providers/models/llm/azure_openai_model.py create mode 100644 api/core/model_providers/models/llm/base.py create mode 100644 api/core/model_providers/models/llm/chatglm_model.py create mode 100644 api/core/model_providers/models/llm/huggingface_hub_model.py create mode 100644 api/core/model_providers/models/llm/minimax_model.py create mode 100644 api/core/model_providers/models/llm/openai_model.py create mode 100644 api/core/model_providers/models/llm/replicate_model.py create mode 100644 api/core/model_providers/models/llm/spark_model.py create mode 100644 api/core/model_providers/models/llm/tongyi_model.py create mode 100644 api/core/model_providers/models/llm/wenxin_model.py create mode 100644 api/core/model_providers/models/moderation/__init__.py create mode 100644 api/core/model_providers/models/moderation/openai_moderation.py create mode 100644 api/core/model_providers/models/speech2text/__init__.py create mode 100644 api/core/model_providers/models/speech2text/base.py create mode 100644 api/core/model_providers/models/speech2text/openai_whisper.py create mode 100644 api/core/model_providers/providers/__init__.py create mode 100644 api/core/model_providers/providers/anthropic_provider.py create mode 100644 api/core/model_providers/providers/azure_openai_provider.py create mode 100644 api/core/model_providers/providers/base.py create mode 100644 api/core/model_providers/providers/chatglm_provider.py create mode 100644 api/core/model_providers/providers/hosted.py create mode 100644 api/core/model_providers/providers/huggingface_hub_provider.py create mode 100644 api/core/model_providers/providers/minimax_provider.py create mode 100644 api/core/model_providers/providers/openai_provider.py create mode 100644 api/core/model_providers/providers/replicate_provider.py create mode 100644 api/core/model_providers/providers/spark_provider.py create mode 100644 api/core/model_providers/providers/tongyi_provider.py create mode 100644 api/core/model_providers/providers/wenxin_provider.py create mode 100644 api/core/model_providers/rules.py create mode 100644 api/core/model_providers/rules/_providers.json create mode 100644 api/core/model_providers/rules/anthropic.json create mode 100644 api/core/model_providers/rules/azure_openai.json create mode 100644 api/core/model_providers/rules/chatglm.json create mode 100644 api/core/model_providers/rules/huggingface_hub.json create mode 100644 api/core/model_providers/rules/minimax.json create mode 100644 api/core/model_providers/rules/openai.json create mode 100644 api/core/model_providers/rules/replicate.json create mode 100644 api/core/model_providers/rules/spark.json create mode 100644 api/core/model_providers/rules/tongyi.json create mode 100644 api/core/model_providers/rules/wenxin.json create mode 100644 api/core/third_party/langchain/embeddings/__init__.py create mode 100644 api/core/third_party/langchain/embeddings/replicate_embedding.py create mode 100644 api/core/third_party/langchain/llms/__init__.py rename api/core/{llm/streamable_azure_chat_open_ai.py => third_party/langchain/llms/azure_chat_open_ai.py} (75%) rename api/core/{llm/streamable_azure_open_ai.py => third_party/langchain/llms/azure_open_ai.py} (87%) rename api/core/{llm/streamable_chat_open_ai.py => third_party/langchain/llms/chat_open_ai.py} (62%) rename api/core/{llm => third_party/langchain/llms}/fake.py (85%) rename api/core/{llm/streamable_open_ai.py => third_party/langchain/llms/open_ai.py} (74%) create mode 100644 api/core/third_party/langchain/llms/replicate_llm.py create mode 100644 api/core/third_party/langchain/llms/spark.py create mode 100644 api/core/third_party/langchain/llms/tongyi_llm.py create mode 100644 api/core/third_party/langchain/llms/wenxin.py create mode 100644 api/core/third_party/spark/__init__.py create mode 100644 api/core/third_party/spark/spark_llm.py delete mode 100644 api/core/tool/dataset_index_tool.py delete mode 100644 api/events/event_handlers/create_provider_when_tenant_created.py delete mode 100644 api/events/event_handlers/create_provider_when_tenant_updated.py create mode 100644 api/extensions/ext_stripe.py create mode 100644 api/migrations/versions/16fa53d9faec_add_provider_model_support.py create mode 100644 api/migrations/versions/5022897aaceb_add_model_name_in_embedding.py create mode 100644 api/migrations/versions/bf0aec5ba2cf_add_provider_order.py create mode 100644 api/migrations/versions/e35ed59becda_modify_quota_limit_field_type.py create mode 100644 api/services/provider_checkout_service.py delete mode 100644 api/tests/conftest.py create mode 100644 api/tests/integration_tests/.env.example create mode 100644 api/tests/integration_tests/__init__.py create mode 100644 api/tests/integration_tests/conftest.py create mode 100644 api/tests/integration_tests/models/__init__.py create mode 100644 api/tests/integration_tests/models/embedding/__init__.py create mode 100644 api/tests/integration_tests/models/embedding/test_azure_openai_embedding.py create mode 100644 api/tests/integration_tests/models/embedding/test_minimax_embedding.py create mode 100644 api/tests/integration_tests/models/embedding/test_openai_embedding.py create mode 100644 api/tests/integration_tests/models/embedding/test_replicate_embedding.py create mode 100644 api/tests/integration_tests/models/llm/__init__.py create mode 100644 api/tests/integration_tests/models/llm/test_anthropic_model.py create mode 100644 api/tests/integration_tests/models/llm/test_azure_openai_model.py create mode 100644 api/tests/integration_tests/models/llm/test_huggingface_hub_model.py create mode 100644 api/tests/integration_tests/models/llm/test_minimax_model.py create mode 100644 api/tests/integration_tests/models/llm/test_openai_model.py create mode 100644 api/tests/integration_tests/models/llm/test_replicate_model.py create mode 100644 api/tests/integration_tests/models/llm/test_spark_model.py create mode 100644 api/tests/integration_tests/models/llm/test_tongyi_model.py create mode 100644 api/tests/integration_tests/models/llm/test_wenxin_model.py create mode 100644 api/tests/integration_tests/models/moderation/__init__.py create mode 100644 api/tests/integration_tests/models/moderation/test_openai_moderation.py create mode 100644 api/tests/integration_tests/models/speech2text/__init__.py create mode 100644 api/tests/integration_tests/models/speech2text/audio.mp3 create mode 100644 api/tests/integration_tests/models/speech2text/test_openai_whisper.py delete mode 100644 api/tests/test_controllers/test_account_api.py.bak delete mode 100644 api/tests/test_controllers/test_login.py delete mode 100644 api/tests/test_controllers/test_setup.py delete mode 100644 api/tests/test_factory.py create mode 100644 api/tests/unit_tests/__init__.py create mode 100644 api/tests/unit_tests/model_providers/__init__.py create mode 100644 api/tests/unit_tests/model_providers/fake_model_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_anthropic_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_azure_openai_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_base_model_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_chatglm_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_huggingface_hub_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_minimax_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_openai_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_replicate_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_spark_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_tongyi_provider.py create mode 100644 api/tests/unit_tests/model_providers/test_wenxin_provider.py diff --git a/.github/workflows/check_no_chinese_comments.py b/.github/workflows/check_no_chinese_comments.py index fc01b8163..e59cfb538 100644 --- a/.github/workflows/check_no_chinese_comments.py +++ b/.github/workflows/check_no_chinese_comments.py @@ -19,7 +19,8 @@ def check_file_for_chinese_comments(file_path): def main(): has_chinese = False - excluded_files = ["model_template.py", 'stopwords.py', 'commands.py', 'indexing_runner.py', 'web_reader_tool.py'] + excluded_files = ["model_template.py", 'stopwords.py', 'commands.py', + 'indexing_runner.py', 'web_reader_tool.py', 'spark_provider.py'] for root, _, files in os.walk("."): for file in files: diff --git a/api/.env.example b/api/.env.example index 3ea7e5be3..946e5e9af 100644 --- a/api/.env.example +++ b/api/.env.example @@ -102,3 +102,29 @@ NOTION_INTEGRATION_TYPE=public NOTION_CLIENT_SECRET=you-client-secret NOTION_CLIENT_ID=you-client-id NOTION_INTERNAL_SECRET=you-internal-secret + +# Hosted Model Credentials +HOSTED_OPENAI_ENABLED=false +HOSTED_OPENAI_API_KEY= +HOSTED_OPENAI_API_BASE= +HOSTED_OPENAI_API_ORGANIZATION= +HOSTED_OPENAI_QUOTA_LIMIT=200 +HOSTED_OPENAI_PAID_ENABLED=false +HOSTED_OPENAI_PAID_STRIPE_PRICE_ID= +HOSTED_OPENAI_PAID_INCREASE_QUOTA=1 + +HOSTED_AZURE_OPENAI_ENABLED=false +HOSTED_AZURE_OPENAI_API_KEY= +HOSTED_AZURE_OPENAI_API_BASE= +HOSTED_AZURE_OPENAI_QUOTA_LIMIT=200 + +HOSTED_ANTHROPIC_ENABLED=false +HOSTED_ANTHROPIC_API_BASE= +HOSTED_ANTHROPIC_API_KEY= +HOSTED_ANTHROPIC_QUOTA_LIMIT=1000000 +HOSTED_ANTHROPIC_PAID_ENABLED=false +HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID= +HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA=1 + +STRIPE_API_KEY= +STRIPE_WEBHOOK_SECRET= \ No newline at end of file diff --git a/api/app.py b/api/app.py index dac357c0e..4a10212f6 100644 --- a/api/app.py +++ b/api/app.py @@ -16,8 +16,9 @@ from flask import Flask, request, Response, session import flask_login from flask_cors import CORS +from core.model_providers.providers import hosted from extensions import ext_session, ext_celery, ext_sentry, ext_redis, ext_login, ext_migrate, \ - ext_database, ext_storage, ext_mail + ext_database, ext_storage, ext_mail, ext_stripe from extensions.ext_database import db from extensions.ext_login import login_manager @@ -71,7 +72,7 @@ def create_app(test_config=None) -> Flask: register_blueprints(app) register_commands(app) - core.init_app(app) + hosted.init_app(app) return app @@ -88,6 +89,7 @@ def initialize_extensions(app): ext_login.init_app(app) ext_mail.init_app(app) ext_sentry.init_app(app) + ext_stripe.init_app(app) def _create_tenant_for_account(account): @@ -246,5 +248,18 @@ def threads(): } +@app.route('/db-pool-stat') +def pool_stat(): + engine = db.engine + return { + 'pool_size': engine.pool.size(), + 'checked_in_connections': engine.pool.checkedin(), + 'checked_out_connections': engine.pool.checkedout(), + 'overflow_connections': engine.pool.overflow(), + 'connection_timeout': engine.pool.timeout(), + 'recycle_time': db.engine.pool._recycle + } + + if __name__ == '__main__': app.run(host='0.0.0.0', port=5001) diff --git a/api/commands.py b/api/commands.py index a25c5fc03..caa5e1ee2 100644 --- a/api/commands.py +++ b/api/commands.py @@ -1,5 +1,5 @@ import datetime -import logging +import math import random import string import time @@ -9,18 +9,18 @@ from flask import current_app from werkzeug.exceptions import NotFound from core.index.index import IndexBuilder +from core.model_providers.providers.hosted import hosted_model_providers from libs.password import password_pattern, valid_password, hash_password from libs.helper import email as email_validate from extensions.ext_database import db from libs.rsa import generate_key_pair from models.account import InvitationCode, Tenant -from models.dataset import Dataset, DatasetQuery, Document, DocumentSegment +from models.dataset import Dataset, DatasetQuery, Document from models.model import Account import secrets import base64 -from models.provider import Provider, ProviderName -from services.provider_service import ProviderService +from models.provider import Provider, ProviderType, ProviderQuotaType @click.command('reset-password', help='Reset the account password.') @@ -251,26 +251,37 @@ def clean_unused_dataset_indexes(): @click.command('sync-anthropic-hosted-providers', help='Sync anthropic hosted providers.') def sync_anthropic_hosted_providers(): + if not hosted_model_providers.anthropic: + click.echo(click.style('Anthropic hosted provider is not configured.', fg='red')) + return + click.echo(click.style('Start sync anthropic hosted providers.', fg='green')) count = 0 page = 1 while True: try: - tenants = db.session.query(Tenant).order_by(Tenant.created_at.desc()).paginate(page=page, per_page=50) + providers = db.session.query(Provider).filter( + Provider.provider_name == 'anthropic', + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == ProviderQuotaType.TRIAL.value, + ).order_by(Provider.created_at.desc()).paginate(page=page, per_page=100) except NotFound: break page += 1 - for tenant in tenants: + for provider in providers: try: - click.echo('Syncing tenant anthropic hosted provider: {}'.format(tenant.id)) - ProviderService.create_system_provider( - tenant, - ProviderName.ANTHROPIC.value, - current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'], - True - ) + click.echo('Syncing tenant anthropic hosted provider: {}'.format(provider.tenant_id)) + original_quota_limit = provider.quota_limit + new_quota_limit = hosted_model_providers.anthropic.quota_limit + division = math.ceil(new_quota_limit / 1000) + + provider.quota_limit = new_quota_limit if original_quota_limit == 1000 \ + else original_quota_limit * division + provider.quota_used = division * provider.quota_used + db.session.commit() + count += 1 except Exception as e: click.echo(click.style( diff --git a/api/config.py b/api/config.py index 26fdfefff..32e2c66ed 100644 --- a/api/config.py +++ b/api/config.py @@ -41,6 +41,7 @@ DEFAULTS = { 'SESSION_USE_SIGNER': 'True', 'DEPLOY_ENV': 'PRODUCTION', 'SQLALCHEMY_POOL_SIZE': 30, + 'SQLALCHEMY_POOL_RECYCLE': 3600, 'SQLALCHEMY_ECHO': 'False', 'SENTRY_TRACES_SAMPLE_RATE': 1.0, 'SENTRY_PROFILES_SAMPLE_RATE': 1.0, @@ -50,9 +51,16 @@ DEFAULTS = { 'PDF_PREVIEW': 'True', 'LOG_LEVEL': 'INFO', 'DISABLE_PROVIDER_CONFIG_VALIDATION': 'False', - 'DEFAULT_LLM_PROVIDER': 'openai', - 'OPENAI_HOSTED_QUOTA_LIMIT': 200, - 'ANTHROPIC_HOSTED_QUOTA_LIMIT': 1000, + 'HOSTED_OPENAI_QUOTA_LIMIT': 200, + 'HOSTED_OPENAI_ENABLED': 'False', + 'HOSTED_OPENAI_PAID_ENABLED': 'False', + 'HOSTED_OPENAI_PAID_INCREASE_QUOTA': 1, + 'HOSTED_AZURE_OPENAI_ENABLED': 'False', + 'HOSTED_AZURE_OPENAI_QUOTA_LIMIT': 200, + 'HOSTED_ANTHROPIC_QUOTA_LIMIT': 1000000, + 'HOSTED_ANTHROPIC_ENABLED': 'False', + 'HOSTED_ANTHROPIC_PAID_ENABLED': 'False', + 'HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA': 1, 'TENANT_DOCUMENT_COUNT': 100, 'CLEAN_DAY_SETTING': 30 } @@ -182,7 +190,10 @@ class Config: } self.SQLALCHEMY_DATABASE_URI = f"postgresql://{db_credentials['DB_USERNAME']}:{db_credentials['DB_PASSWORD']}@{db_credentials['DB_HOST']}:{db_credentials['DB_PORT']}/{db_credentials['DB_DATABASE']}" - self.SQLALCHEMY_ENGINE_OPTIONS = {'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE'))} + self.SQLALCHEMY_ENGINE_OPTIONS = { + 'pool_size': int(get_env('SQLALCHEMY_POOL_SIZE')), + 'pool_recycle': int(get_env('SQLALCHEMY_POOL_RECYCLE')) + } self.SQLALCHEMY_ECHO = get_bool_env('SQLALCHEMY_ECHO') @@ -194,20 +205,35 @@ class Config: self.BROKER_USE_SSL = self.CELERY_BROKER_URL.startswith('rediss://') # hosted provider credentials - self.OPENAI_API_KEY = get_env('OPENAI_API_KEY') - self.ANTHROPIC_API_KEY = get_env('ANTHROPIC_API_KEY') + self.HOSTED_OPENAI_ENABLED = get_bool_env('HOSTED_OPENAI_ENABLED') + self.HOSTED_OPENAI_API_KEY = get_env('HOSTED_OPENAI_API_KEY') + self.HOSTED_OPENAI_API_BASE = get_env('HOSTED_OPENAI_API_BASE') + self.HOSTED_OPENAI_API_ORGANIZATION = get_env('HOSTED_OPENAI_API_ORGANIZATION') + self.HOSTED_OPENAI_QUOTA_LIMIT = get_env('HOSTED_OPENAI_QUOTA_LIMIT') + self.HOSTED_OPENAI_PAID_ENABLED = get_bool_env('HOSTED_OPENAI_PAID_ENABLED') + self.HOSTED_OPENAI_PAID_STRIPE_PRICE_ID = get_env('HOSTED_OPENAI_PAID_STRIPE_PRICE_ID') + self.HOSTED_OPENAI_PAID_INCREASE_QUOTA = int(get_env('HOSTED_OPENAI_PAID_INCREASE_QUOTA')) - self.OPENAI_HOSTED_QUOTA_LIMIT = get_env('OPENAI_HOSTED_QUOTA_LIMIT') - self.ANTHROPIC_HOSTED_QUOTA_LIMIT = get_env('ANTHROPIC_HOSTED_QUOTA_LIMIT') + self.HOSTED_AZURE_OPENAI_ENABLED = get_bool_env('HOSTED_AZURE_OPENAI_ENABLED') + self.HOSTED_AZURE_OPENAI_API_KEY = get_env('HOSTED_AZURE_OPENAI_API_KEY') + self.HOSTED_AZURE_OPENAI_API_BASE = get_env('HOSTED_AZURE_OPENAI_API_BASE') + self.HOSTED_AZURE_OPENAI_QUOTA_LIMIT = get_env('HOSTED_AZURE_OPENAI_QUOTA_LIMIT') + + self.HOSTED_ANTHROPIC_ENABLED = get_bool_env('HOSTED_ANTHROPIC_ENABLED') + self.HOSTED_ANTHROPIC_API_BASE = get_env('HOSTED_ANTHROPIC_API_BASE') + self.HOSTED_ANTHROPIC_API_KEY = get_env('HOSTED_ANTHROPIC_API_KEY') + self.HOSTED_ANTHROPIC_QUOTA_LIMIT = get_env('HOSTED_ANTHROPIC_QUOTA_LIMIT') + self.HOSTED_ANTHROPIC_PAID_ENABLED = get_bool_env('HOSTED_ANTHROPIC_PAID_ENABLED') + self.HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID = get_env('HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID') + self.HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA = get_env('HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA') + + self.STRIPE_API_KEY = get_env('STRIPE_API_KEY') + self.STRIPE_WEBHOOK_SECRET = get_env('STRIPE_WEBHOOK_SECRET') # By default it is False # You could disable it for compatibility with certain OpenAPI providers self.DISABLE_PROVIDER_CONFIG_VALIDATION = get_bool_env('DISABLE_PROVIDER_CONFIG_VALIDATION') - # For temp use only - # set default LLM provider, default is 'openai', support `azure_openai` - self.DEFAULT_LLM_PROVIDER = get_env('DEFAULT_LLM_PROVIDER') - # notion import setting self.NOTION_CLIENT_ID = get_env('NOTION_CLIENT_ID') self.NOTION_CLIENT_SECRET = get_env('NOTION_CLIENT_SECRET') diff --git a/api/controllers/console/__init__.py b/api/controllers/console/__init__.py index deb4c4425..4834f8455 100644 --- a/api/controllers/console/__init__.py +++ b/api/controllers/console/__init__.py @@ -18,10 +18,13 @@ from .auth import login, oauth, data_source_oauth, activate from .datasets import datasets, datasets_document, datasets_segments, file, hit_testing, data_source # Import workspace controllers -from .workspace import workspace, members, model_providers, account, tool_providers +from .workspace import workspace, members, providers, model_providers, account, tool_providers, models # Import explore controllers from .explore import installed_app, recommended_app, completion, conversation, message, parameter, saved_message, audio # Import universal chat controllers from .universal_chat import chat, conversation, message, parameter, audio + +# Import webhook controllers +from .webhook import stripe diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index eb443931d..d0bbbe8bf 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -2,16 +2,17 @@ import json from datetime import datetime -import flask from flask_login import login_required, current_user from flask_restful import Resource, reqparse, fields, marshal_with, abort, inputs -from werkzeug.exceptions import Unauthorized, Forbidden +from werkzeug.exceptions import Forbidden from constants.model_template import model_templates, demo_model_templates from controllers.console import api -from controllers.console.app.error import AppNotFoundError +from controllers.console.app.error import AppNotFoundError, ProviderNotInitializeError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.model_params import ModelType from events.app_event import app_was_created, app_was_deleted from libs.helper import TimestampField from extensions.ext_database import db @@ -126,9 +127,9 @@ class AppListApi(Resource): if args['model_config'] is not None: # validate config model_configuration = AppModelConfigService.validate_configuration( + tenant_id=current_user.current_tenant_id, account=current_user, - config=args['model_config'], - mode=args['mode'] + config=args['model_config'] ) app = App( @@ -164,6 +165,21 @@ class AppListApi(Resource): app = App(**model_config_template['app']) app_model_config = AppModelConfig(**model_config_template['model_config']) + default_model = ModelFactory.get_default_model( + tenant_id=current_user.current_tenant_id, + model_type=ModelType.TEXT_GENERATION + ) + + if default_model: + model_dict = app_model_config.model_dict + model_dict['provider'] = default_model.provider_name + model_dict['name'] = default_model.model_name + app_model_config.model = json.dumps(model_dict) + else: + raise ProviderNotInitializeError( + f"No Text Generation Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") + app.name = args['name'] app.mode = args['mode'] app.icon = args['icon'] diff --git a/api/controllers/console/app/audio.py b/api/controllers/console/app/audio.py index 075e8d4a9..16749e994 100644 --- a/api/controllers/console/app/audio.py +++ b/api/controllers/console/app/audio.py @@ -14,7 +14,7 @@ from controllers.console.app.error import AppUnavailableError, \ UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from flask_restful import Resource from services.audio_service import AudioService diff --git a/api/controllers/console/app/completion.py b/api/controllers/console/app/completion.py index e76186671..0773abb20 100644 --- a/api/controllers/console/app/completion.py +++ b/api/controllers/console/app/completion.py @@ -17,7 +17,7 @@ from controllers.console.app.error import ConversationCompletedError, AppUnavail from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from core.conversation_message_task import PubHandler -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value from flask_restful import Resource, reqparse @@ -41,8 +41,11 @@ class CompletionMessageApi(Resource): parser.add_argument('inputs', type=dict, required=True, location='json') parser.add_argument('query', type=str, location='json') parser.add_argument('model_config', type=dict, required=True, location='json') + parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json') args = parser.parse_args() + streaming = args['response_mode'] != 'blocking' + account = flask_login.current_user try: @@ -51,7 +54,7 @@ class CompletionMessageApi(Resource): user=account, args=args, from_source='console', - streaming=True, + streaming=streaming, is_model_config_override=True ) @@ -111,8 +114,11 @@ class ChatMessageApi(Resource): parser.add_argument('query', type=str, required=True, location='json') parser.add_argument('model_config', type=dict, required=True, location='json') parser.add_argument('conversation_id', type=uuid_value, location='json') + parser.add_argument('response_mode', type=str, choices=['blocking', 'streaming'], location='json') args = parser.parse_args() + streaming = args['response_mode'] != 'blocking' + account = flask_login.current_user try: @@ -121,7 +127,7 @@ class ChatMessageApi(Resource): user=account, args=args, from_source='console', - streaming=True, + streaming=streaming, is_model_config_override=True ) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 6b9a0a214..f572f855e 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -7,7 +7,7 @@ from controllers.console.app.error import ProviderNotInitializeError, ProviderQu from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from core.generator.llm_generator import LLMGenerator -from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \ +from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, LLMBadRequestError, LLMAPIConnectionError, \ LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py index c5764a7ec..9c527eddc 100644 --- a/api/controllers/console/app/message.py +++ b/api/controllers/console/app/message.py @@ -14,7 +14,7 @@ from controllers.console.app.error import CompletionRequestError, ProviderNotIni AppMoreLikeThisDisabledError, ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required -from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value, TimestampField from libs.infinite_scroll_pagination import InfiniteScrollPagination diff --git a/api/controllers/console/app/model_config.py b/api/controllers/console/app/model_config.py index 3197f9eba..d0c648ba1 100644 --- a/api/controllers/console/app/model_config.py +++ b/api/controllers/console/app/model_config.py @@ -28,9 +28,9 @@ class ModelConfigResource(Resource): # validate config model_configuration = AppModelConfigService.validate_configuration( + tenant_id=current_user.current_tenant_id, account=current_user, - config=request.json, - mode=app_model.mode + config=request.json ) new_app_model_config = AppModelConfig( diff --git a/api/controllers/console/datasets/data_source.py b/api/controllers/console/datasets/data_source.py index 415113262..65f8225f1 100644 --- a/api/controllers/console/datasets/data_source.py +++ b/api/controllers/console/datasets/data_source.py @@ -255,7 +255,7 @@ class DataSourceNotionApi(Resource): # validate args DocumentService.estimate_args_validate(args) indexing_runner = IndexingRunner() - response = indexing_runner.notion_indexing_estimate(args['notion_info_list'], args['process_rule']) + response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id, args['notion_info_list'], args['process_rule']) return response, 200 diff --git a/api/controllers/console/datasets/datasets.py b/api/controllers/console/datasets/datasets.py index 188110344..dfe9026eb 100644 --- a/api/controllers/console/datasets/datasets.py +++ b/api/controllers/console/datasets/datasets.py @@ -5,10 +5,13 @@ from flask_restful import Resource, reqparse, fields, marshal, marshal_with from werkzeug.exceptions import NotFound, Forbidden import services from controllers.console import api +from controllers.console.app.error import ProviderNotInitializeError from controllers.console.datasets.error import DatasetNameDuplicateError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from core.indexing_runner import IndexingRunner +from core.model_providers.error import LLMBadRequestError +from core.model_providers.model_factory import ModelFactory from libs.helper import TimestampField from extensions.ext_database import db from models.dataset import DocumentSegment, Document @@ -97,6 +100,15 @@ class DatasetListApi(Resource): if current_user.current_tenant.current_role not in ['admin', 'owner']: raise Forbidden() + try: + ModelFactory.get_embedding_model( + tenant_id=current_user.current_tenant_id + ) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") + try: dataset = DatasetService.create_empty_dataset( tenant_id=current_user.current_tenant_id, @@ -235,12 +247,26 @@ class DatasetIndexingEstimateApi(Resource): raise NotFound("File not found.") indexing_runner = IndexingRunner() - response = indexing_runner.file_indexing_estimate(file_details, args['process_rule'], args['doc_form']) + + try: + response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details, + args['process_rule'], args['doc_form']) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") elif args['info_list']['data_source_type'] == 'notion_import': indexing_runner = IndexingRunner() - response = indexing_runner.notion_indexing_estimate(args['info_list']['notion_info_list'], - args['process_rule'], args['doc_form']) + + try: + response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id, + args['info_list']['notion_info_list'], + args['process_rule'], args['doc_form']) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") else: raise ValueError('Data source type not support') return response, 200 diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index 02ddfbf46..a1ef7b767 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -18,7 +18,9 @@ from controllers.console.datasets.error import DocumentAlreadyFinishedError, Inv from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required from core.indexing_runner import IndexingRunner -from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError +from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \ + LLMBadRequestError +from core.model_providers.model_factory import ModelFactory from extensions.ext_redis import redis_client from libs.helper import TimestampField from extensions.ext_database import db @@ -280,6 +282,15 @@ class DatasetDocumentListApi(Resource): # validate args DocumentService.document_create_args_validate(args) + try: + ModelFactory.get_embedding_model( + tenant_id=current_user.current_tenant_id + ) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") + try: documents, batch = DocumentService.save_document_with_dataset_id(dataset, args, current_user) except ProviderTokenNotInitError as ex: @@ -319,6 +330,15 @@ class DatasetInitApi(Resource): parser.add_argument('doc_form', type=str, default='text_model', required=False, nullable=False, location='json') args = parser.parse_args() + try: + ModelFactory.get_embedding_model( + tenant_id=current_user.current_tenant_id + ) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") + # validate args DocumentService.document_create_args_validate(args) @@ -384,7 +404,13 @@ class DocumentIndexingEstimateApi(DocumentResource): indexing_runner = IndexingRunner() - response = indexing_runner.file_indexing_estimate([file], data_process_rule_dict) + try: + response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, [file], + data_process_rule_dict) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") return response @@ -445,12 +471,24 @@ class DocumentBatchIndexingEstimateApi(DocumentResource): raise NotFound("File not found.") indexing_runner = IndexingRunner() - response = indexing_runner.file_indexing_estimate(file_details, data_process_rule_dict) + try: + response = indexing_runner.file_indexing_estimate(current_user.current_tenant_id, file_details, + data_process_rule_dict) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") elif dataset.data_source_type: indexing_runner = IndexingRunner() - response = indexing_runner.notion_indexing_estimate(info_list, - data_process_rule_dict) + try: + response = indexing_runner.notion_indexing_estimate(current_user.current_tenant_id, + info_list, + data_process_rule_dict) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"No Embedding Model available. Please configure a valid provider " + f"in the Settings -> Model Provider.") else: raise ValueError('Data source type not support') return response diff --git a/api/controllers/console/datasets/hit_testing.py b/api/controllers/console/datasets/hit_testing.py index f627949d3..399bd4c0c 100644 --- a/api/controllers/console/datasets/hit_testing.py +++ b/api/controllers/console/datasets/hit_testing.py @@ -11,7 +11,7 @@ from controllers.console.app.error import ProviderNotInitializeError, ProviderQu from controllers.console.datasets.error import HighQualityDatasetOnlyError, DatasetNotInitializedError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required -from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError +from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import TimestampField from services.dataset_service import DatasetService from services.hit_testing_service import HitTestingService @@ -102,6 +102,8 @@ class HitTestingApi(Resource): raise ProviderQuotaExceededError() except ModelCurrentlyNotSupportError: raise ProviderModelCurrentlyNotSupportError() + except ValueError as e: + raise ValueError(str(e)) except Exception as e: logging.exception("Hit testing failed.") raise InternalServerError(str(e)) diff --git a/api/controllers/console/explore/audio.py b/api/controllers/console/explore/audio.py index 991a228dd..50ddfac98 100644 --- a/api/controllers/console/explore/audio.py +++ b/api/controllers/console/explore/audio.py @@ -11,7 +11,7 @@ from controllers.console.app.error import AppUnavailableError, ProviderNotInitia NoAudioUploadedError, AudioTooLargeError, \ UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError from controllers.console.explore.wraps import InstalledAppResource -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from services.audio_service import AudioService from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \ diff --git a/api/controllers/console/explore/completion.py b/api/controllers/console/explore/completion.py index bc4b88ad1..d48c85a73 100644 --- a/api/controllers/console/explore/completion.py +++ b/api/controllers/console/explore/completion.py @@ -15,7 +15,7 @@ from controllers.console.app.error import ConversationCompletedError, AppUnavail from controllers.console.explore.error import NotCompletionAppError, NotChatAppError from controllers.console.explore.wraps import InstalledAppResource from core.conversation_message_task import PubHandler -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value from services.completion_service import CompletionService diff --git a/api/controllers/console/explore/message.py b/api/controllers/console/explore/message.py index 1232169ea..160ebee12 100644 --- a/api/controllers/console/explore/message.py +++ b/api/controllers/console/explore/message.py @@ -15,7 +15,7 @@ from controllers.console.app.error import AppMoreLikeThisDisabledError, Provider ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError from controllers.console.explore.error import NotCompletionAppError, AppSuggestedQuestionsAfterAnswerDisabledError from controllers.console.explore.wraps import InstalledAppResource -from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value, TimestampField from services.completion_service import CompletionService diff --git a/api/controllers/console/explore/parameter.py b/api/controllers/console/explore/parameter.py index 09b4f987e..13e356cb2 100644 --- a/api/controllers/console/explore/parameter.py +++ b/api/controllers/console/explore/parameter.py @@ -4,8 +4,6 @@ from flask_restful import marshal_with, fields from controllers.console import api from controllers.console.explore.wraps import InstalledAppResource -from core.llm.llm_builder import LLMBuilder -from models.provider import ProviderName from models.model import InstalledApp @@ -35,13 +33,12 @@ class AppParameterApi(InstalledAppResource): """Retrieve app parameters.""" app_model = installed_app.app app_model_config = app_model.app_model_config - provider_name = LLMBuilder.get_default_provider(installed_app.tenant_id, 'whisper-1') return { 'opening_statement': app_model_config.opening_statement, 'suggested_questions': app_model_config.suggested_questions_list, 'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict, - 'speech_to_text': app_model_config.speech_to_text_dict if provider_name == ProviderName.OPENAI.value else { 'enabled': False }, + 'speech_to_text': app_model_config.speech_to_text_dict, 'more_like_this': app_model_config.more_like_this_dict, 'user_input_form': app_model_config.user_input_form_list } diff --git a/api/controllers/console/universal_chat/audio.py b/api/controllers/console/universal_chat/audio.py index 41d5382c7..38bcc25b2 100644 --- a/api/controllers/console/universal_chat/audio.py +++ b/api/controllers/console/universal_chat/audio.py @@ -11,7 +11,7 @@ from controllers.console.app.error import AppUnavailableError, ProviderNotInitia NoAudioUploadedError, AudioTooLargeError, \ UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError from controllers.console.universal_chat.wraps import UniversalChatResource -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from services.audio_service import AudioService from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \ diff --git a/api/controllers/console/universal_chat/chat.py b/api/controllers/console/universal_chat/chat.py index 2a95eb992..a6aa84204 100644 --- a/api/controllers/console/universal_chat/chat.py +++ b/api/controllers/console/universal_chat/chat.py @@ -12,9 +12,8 @@ from controllers.console import api from controllers.console.app.error import ConversationCompletedError, AppUnavailableError, ProviderNotInitializeError, \ ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError from controllers.console.universal_chat.wraps import UniversalChatResource -from core.constant import llm_constant from core.conversation_message_task import PubHandler -from core.llm.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \ +from core.model_providers.error import ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError, \ LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError from libs.helper import uuid_value from services.completion_service import CompletionService @@ -27,6 +26,7 @@ class UniversalChatApi(UniversalChatResource): parser = reqparse.RequestParser() parser.add_argument('query', type=str, required=True, location='json') parser.add_argument('conversation_id', type=uuid_value, location='json') + parser.add_argument('provider', type=str, required=True, location='json') parser.add_argument('model', type=str, required=True, location='json') parser.add_argument('tools', type=list, required=True, location='json') args = parser.parse_args() @@ -36,11 +36,7 @@ class UniversalChatApi(UniversalChatResource): # update app model config args['model_config'] = app_model_config.to_dict() args['model_config']['model']['name'] = args['model'] - - if not llm_constant.models[args['model']]: - raise ValueError("Model not exists.") - - args['model_config']['model']['provider'] = llm_constant.models[args['model']] + args['model_config']['model']['provider'] = args['provider'] args['model_config']['agent_mode']['tools'] = args['tools'] if not args['model_config']['agent_mode']['tools']: diff --git a/api/controllers/console/universal_chat/message.py b/api/controllers/console/universal_chat/message.py index cbb413482..07d8b37fe 100644 --- a/api/controllers/console/universal_chat/message.py +++ b/api/controllers/console/universal_chat/message.py @@ -12,7 +12,7 @@ from controllers.console.app.error import ProviderNotInitializeError, \ ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, CompletionRequestError from controllers.console.explore.error import AppSuggestedQuestionsAfterAnswerDisabledError from controllers.console.universal_chat.wraps import UniversalChatResource -from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value, TimestampField from services.errors.conversation import ConversationNotExistsError diff --git a/api/controllers/console/universal_chat/parameter.py b/api/controllers/console/universal_chat/parameter.py index c8351d0cb..b492bba50 100644 --- a/api/controllers/console/universal_chat/parameter.py +++ b/api/controllers/console/universal_chat/parameter.py @@ -4,8 +4,6 @@ from flask_restful import marshal_with, fields from controllers.console import api from controllers.console.universal_chat.wraps import UniversalChatResource -from core.llm.llm_builder import LLMBuilder -from models.provider import ProviderName from models.model import App @@ -23,13 +21,12 @@ class UniversalChatParameterApi(UniversalChatResource): """Retrieve app parameters.""" app_model = universal_app app_model_config = app_model.app_model_config - provider_name = LLMBuilder.get_default_provider(universal_app.tenant_id, 'whisper-1') return { 'opening_statement': app_model_config.opening_statement, 'suggested_questions': app_model_config.suggested_questions_list, 'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict, - 'speech_to_text': app_model_config.speech_to_text_dict if provider_name == ProviderName.OPENAI.value else { 'enabled': False }, + 'speech_to_text': app_model_config.speech_to_text_dict, } diff --git a/api/tests/test_controllers/__init__.py b/api/controllers/console/webhook/__init__.py similarity index 100% rename from api/tests/test_controllers/__init__.py rename to api/controllers/console/webhook/__init__.py diff --git a/api/controllers/console/webhook/stripe.py b/api/controllers/console/webhook/stripe.py new file mode 100644 index 000000000..da906b0dc --- /dev/null +++ b/api/controllers/console/webhook/stripe.py @@ -0,0 +1,53 @@ +import logging + +import stripe +from flask import request, current_app +from flask_restful import Resource + +from controllers.console import api +from controllers.console.setup import setup_required +from controllers.console.wraps import only_edition_cloud +from services.provider_checkout_service import ProviderCheckoutService + + +class StripeWebhookApi(Resource): + @setup_required + @only_edition_cloud + def post(self): + payload = request.data + sig_header = request.headers.get('STRIPE_SIGNATURE') + webhook_secret = current_app.config.get('STRIPE_WEBHOOK_SECRET') + + try: + event = stripe.Webhook.construct_event( + payload, sig_header, webhook_secret + ) + except ValueError as e: + # Invalid payload + return 'Invalid payload', 400 + except stripe.error.SignatureVerificationError as e: + # Invalid signature + return 'Invalid signature', 400 + + # Handle the checkout.session.completed event + if event['type'] == 'checkout.session.completed': + logging.debug(event['data']['object']['id']) + logging.debug(event['data']['object']['amount_subtotal']) + logging.debug(event['data']['object']['currency']) + logging.debug(event['data']['object']['payment_intent']) + logging.debug(event['data']['object']['payment_status']) + logging.debug(event['data']['object']['metadata']) + + # Fulfill the purchase... + provider_checkout_service = ProviderCheckoutService() + + try: + provider_checkout_service.fulfill_provider_order(event) + except Exception as e: + logging.debug(str(e)) + return 'success', 200 + + return 'success', 200 + + +api.add_resource(StripeWebhookApi, '/webhook/stripe') diff --git a/api/controllers/console/workspace/model_providers.py b/api/controllers/console/workspace/model_providers.py index 1991b1c6c..d32591803 100644 --- a/api/controllers/console/workspace/model_providers.py +++ b/api/controllers/console/workspace/model_providers.py @@ -1,24 +1,18 @@ -# -*- coding:utf-8 -*- -import base64 -import json -import logging - -from flask import current_app from flask_login import login_required, current_user -from flask_restful import Resource, reqparse, abort +from flask_restful import Resource, reqparse from werkzeug.exceptions import Forbidden from controllers.console import api +from controllers.console.app.error import ProviderNotInitializeError from controllers.console.setup import setup_required from controllers.console.wraps import account_initialization_required -from core.llm.provider.errors import ValidateFailedError -from extensions.ext_database import db -from libs import rsa -from models.provider import Provider, ProviderType, ProviderName +from core.model_providers.error import LLMBadRequestError +from core.model_providers.providers.base import CredentialsValidateFailedError +from services.provider_checkout_service import ProviderCheckoutService from services.provider_service import ProviderService -class ProviderListApi(Resource): +class ModelProviderListApi(Resource): @setup_required @login_required @@ -26,156 +20,36 @@ class ProviderListApi(Resource): def get(self): tenant_id = current_user.current_tenant_id - """ - If the type is AZURE_OPENAI, decode and return the four fields of azure_api_type, azure_api_version:, - azure_api_base, azure_api_key as an object, where azure_api_key displays the first 6 bits in plaintext, and the - rest is replaced by * and the last two bits are displayed in plaintext - - If the type is other, decode and return the Token field directly, the field displays the first 6 bits in - plaintext, the rest is replaced by * and the last two bits are displayed in plaintext - """ - - ProviderService.init_supported_provider(current_user.current_tenant) - providers = Provider.query.filter_by(tenant_id=tenant_id).all() - - provider_list = [ - { - 'provider_name': p.provider_name, - 'provider_type': p.provider_type, - 'is_valid': p.is_valid, - 'last_used': p.last_used, - 'is_enabled': p.is_enabled, - **({ - 'quota_type': p.quota_type, - 'quota_limit': p.quota_limit, - 'quota_used': p.quota_used - } if p.provider_type == ProviderType.SYSTEM.value else {}), - 'token': ProviderService.get_obfuscated_api_key(current_user.current_tenant, - ProviderName(p.provider_name), only_custom=True) - if p.provider_type == ProviderType.CUSTOM.value else None - } - for p in providers - ] + provider_service = ProviderService() + provider_list = provider_service.get_provider_list(tenant_id) return provider_list -class ProviderTokenApi(Resource): +class ModelProviderValidateApi(Resource): @setup_required @login_required @account_initialization_required - def post(self, provider): - if provider not in [p.value for p in ProviderName]: - abort(404) - - # The role of the current user in the ta table must be admin or owner - if current_user.current_tenant.current_role not in ['admin', 'owner']: - logging.log(logging.ERROR, - f'User {current_user.id} is not authorized to update provider token, current_role is {current_user.current_tenant.current_role}') - raise Forbidden() + def post(self, provider_name: str): parser = reqparse.RequestParser() - - parser.add_argument('token', type=ProviderService.get_token_type( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider) - ), required=True, nullable=False, location='json') - + parser.add_argument('config', type=dict, required=True, nullable=False, location='json') args = parser.parse_args() - if args['token']: - try: - ProviderService.validate_provider_configs( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider), - configs=args['token'] - ) - token_is_valid = True - except ValidateFailedError as ex: - raise ValueError(str(ex)) - - base64_encrypted_token = ProviderService.get_encrypted_token( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider), - configs=args['token'] - ) - else: - base64_encrypted_token = None - token_is_valid = False - - tenant = current_user.current_tenant - - provider_model = db.session.query(Provider).filter( - Provider.tenant_id == tenant.id, - Provider.provider_name == provider, - Provider.provider_type == ProviderType.CUSTOM.value - ).first() - - # Only allow updating token for CUSTOM provider type - if provider_model: - provider_model.encrypted_config = base64_encrypted_token - provider_model.is_valid = token_is_valid - else: - provider_model = Provider(tenant_id=tenant.id, provider_name=provider, - provider_type=ProviderType.CUSTOM.value, - encrypted_config=base64_encrypted_token, - is_valid=token_is_valid) - db.session.add(provider_model) - - if provider in [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value] and provider_model.is_valid: - other_providers = db.session.query(Provider).filter( - Provider.tenant_id == tenant.id, - Provider.provider_name.in_([ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value]), - Provider.provider_name != provider, - Provider.provider_type == ProviderType.CUSTOM.value - ).all() - - for other_provider in other_providers: - other_provider.is_valid = False - - db.session.commit() - - if provider in [ProviderName.AZURE_OPENAI.value, ProviderName.COHERE.value, - ProviderName.HUGGINGFACEHUB.value]: - return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'}, 201 - - return {'result': 'success'}, 201 - - -class ProviderTokenValidateApi(Resource): - - @setup_required - @login_required - @account_initialization_required - def post(self, provider): - if provider not in [p.value for p in ProviderName]: - abort(404) - - parser = reqparse.RequestParser() - parser.add_argument('token', type=ProviderService.get_token_type( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider) - ), required=True, nullable=False, location='json') - args = parser.parse_args() - - # todo: remove this when the provider is supported - if provider in [ProviderName.COHERE.value, - ProviderName.HUGGINGFACEHUB.value]: - return {'result': 'success', 'warning': 'MOCK: This provider is not supported yet.'} + provider_service = ProviderService() result = True error = None try: - ProviderService.validate_provider_configs( - tenant=current_user.current_tenant, - provider_name=ProviderName(provider), - configs=args['token'] + provider_service.custom_provider_config_validate( + provider_name=provider_name, + config=args['config'] ) - except ValidateFailedError as e: + except CredentialsValidateFailedError as ex: result = False - error = str(e) + error = str(ex) response = {'result': 'success' if result else 'error'} @@ -185,91 +59,227 @@ class ProviderTokenValidateApi(Resource): return response -class ProviderSystemApi(Resource): +class ModelProviderUpdateApi(Resource): @setup_required @login_required @account_initialization_required - def put(self, provider): - if provider not in [p.value for p in ProviderName]: - abort(404) - - parser = reqparse.RequestParser() - parser.add_argument('is_enabled', type=bool, required=True, location='json') - args = parser.parse_args() - - tenant = current_user.current_tenant_id - - provider_model = Provider.query.filter_by(tenant_id=tenant.id, provider_name=provider).first() - - if provider_model and provider_model.provider_type == ProviderType.SYSTEM.value: - provider_model.is_valid = args['is_enabled'] - db.session.commit() - elif not provider_model: - if provider == ProviderName.OPENAI.value: - quota_limit = current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'] - elif provider == ProviderName.ANTHROPIC.value: - quota_limit = current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'] - else: - quota_limit = 0 - - ProviderService.create_system_provider( - tenant, - provider, - quota_limit, - args['is_enabled'] - ) - else: - abort(403) - - return {'result': 'success'} - - @setup_required - @login_required - @account_initialization_required - def get(self, provider): - if provider not in [p.value for p in ProviderName]: - abort(404) - - # The role of the current user in the ta table must be admin or owner + def post(self, provider_name: str): if current_user.current_tenant.current_role not in ['admin', 'owner']: raise Forbidden() - provider_model = db.session.query(Provider).filter(Provider.tenant_id == current_user.current_tenant_id, - Provider.provider_name == provider, - Provider.provider_type == ProviderType.SYSTEM.value).first() + parser = reqparse.RequestParser() + parser.add_argument('config', type=dict, required=True, nullable=False, location='json') + args = parser.parse_args() - system_model = None - if provider_model: - system_model = { - 'result': 'success', - 'provider': { - 'provider_name': provider_model.provider_name, - 'provider_type': provider_model.provider_type, - 'is_valid': provider_model.is_valid, - 'last_used': provider_model.last_used, - 'is_enabled': provider_model.is_enabled, - 'quota_type': provider_model.quota_type, - 'quota_limit': provider_model.quota_limit, - 'quota_used': provider_model.quota_used - } + provider_service = ProviderService() + + try: + provider_service.save_custom_provider_config( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name, + config=args['config'] + ) + except CredentialsValidateFailedError as ex: + raise ValueError(str(ex)) + + return {'result': 'success'}, 201 + + @setup_required + @login_required + @account_initialization_required + def delete(self, provider_name: str): + if current_user.current_tenant.current_role not in ['admin', 'owner']: + raise Forbidden() + + provider_service = ProviderService() + provider_service.delete_custom_provider( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name + ) + + return {'result': 'success'}, 204 + + +class ModelProviderModelValidateApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def post(self, provider_name: str): + parser = reqparse.RequestParser() + parser.add_argument('model_name', type=str, required=True, nullable=False, location='json') + parser.add_argument('model_type', type=str, required=True, nullable=False, + choices=['text-generation', 'embeddings', 'speech2text'], location='json') + parser.add_argument('config', type=dict, required=True, nullable=False, location='json') + args = parser.parse_args() + + provider_service = ProviderService() + + result = True + error = None + + try: + provider_service.custom_provider_model_config_validate( + provider_name=provider_name, + model_name=args['model_name'], + model_type=args['model_type'], + config=args['config'] + ) + except CredentialsValidateFailedError as ex: + result = False + error = str(ex) + + response = {'result': 'success' if result else 'error'} + + if not result: + response['error'] = error + + return response + + +class ModelProviderModelUpdateApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def post(self, provider_name: str): + if current_user.current_tenant.current_role not in ['admin', 'owner']: + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument('model_name', type=str, required=True, nullable=False, location='json') + parser.add_argument('model_type', type=str, required=True, nullable=False, + choices=['text-generation', 'embeddings', 'speech2text'], location='json') + parser.add_argument('config', type=dict, required=True, nullable=False, location='json') + args = parser.parse_args() + + provider_service = ProviderService() + + try: + provider_service.add_or_save_custom_provider_model_config( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name, + model_name=args['model_name'], + model_type=args['model_type'], + config=args['config'] + ) + except CredentialsValidateFailedError as ex: + raise ValueError(str(ex)) + + return {'result': 'success'}, 200 + + @setup_required + @login_required + @account_initialization_required + def delete(self, provider_name: str): + if current_user.current_tenant.current_role not in ['admin', 'owner']: + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument('model_name', type=str, required=True, nullable=False, location='args') + parser.add_argument('model_type', type=str, required=True, nullable=False, + choices=['text-generation', 'embeddings', 'speech2text'], location='args') + args = parser.parse_args() + + provider_service = ProviderService() + provider_service.delete_custom_provider_model( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name, + model_name=args['model_name'], + model_type=args['model_type'] + ) + + return {'result': 'success'}, 204 + + +class PreferredProviderTypeUpdateApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def post(self, provider_name: str): + if current_user.current_tenant.current_role not in ['admin', 'owner']: + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument('preferred_provider_type', type=str, required=True, nullable=False, + choices=['system', 'custom'], location='json') + args = parser.parse_args() + + provider_service = ProviderService() + provider_service.switch_preferred_provider( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name, + preferred_provider_type=args['preferred_provider_type'] + ) + + return {'result': 'success'} + + +class ModelProviderModelParameterRuleApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def get(self, provider_name: str): + parser = reqparse.RequestParser() + parser.add_argument('model_name', type=str, required=True, nullable=False, location='args') + args = parser.parse_args() + + provider_service = ProviderService() + + try: + parameter_rules = provider_service.get_model_parameter_rules( + tenant_id=current_user.current_tenant_id, + model_provider_name=provider_name, + model_name=args['model_name'], + model_type='text-generation' + ) + except LLMBadRequestError: + raise ProviderNotInitializeError( + f"Current Text Generation Model is invalid. Please switch to the available model.") + + rules = { + k: { + 'enabled': v.enabled, + 'min': v.min, + 'max': v.max, + 'default': v.default } - else: - abort(404) + for k, v in vars(parameter_rules).items() + } - return system_model + return rules -api.add_resource(ProviderTokenApi, '/providers//token', - endpoint='current_providers_token') # Deprecated -api.add_resource(ProviderTokenValidateApi, '/providers//token-validate', - endpoint='current_providers_token_validate') # Deprecated +class ModelProviderPaymentCheckoutUrlApi(Resource): + @setup_required + @login_required + @account_initialization_required + def get(self, provider_name: str): + provider_service = ProviderCheckoutService() + provider_checkout = provider_service.create_checkout( + tenant_id=current_user.current_tenant_id, + provider_name=provider_name, + account=current_user + ) -api.add_resource(ProviderTokenApi, '/workspaces/current/providers//token', - endpoint='workspaces_current_providers_token') # PUT for updating provider token -api.add_resource(ProviderTokenValidateApi, '/workspaces/current/providers//token-validate', - endpoint='workspaces_current_providers_token_validate') # POST for validating provider token + return { + 'url': provider_checkout.get_checkout_url() + } -api.add_resource(ProviderListApi, '/workspaces/current/providers') # GET for getting providers list -api.add_resource(ProviderSystemApi, '/workspaces/current/providers//system', - endpoint='workspaces_current_providers_system') # GET for getting provider quota, PUT for updating provider status + +api.add_resource(ModelProviderListApi, '/workspaces/current/model-providers') +api.add_resource(ModelProviderValidateApi, '/workspaces/current/model-providers//validate') +api.add_resource(ModelProviderUpdateApi, '/workspaces/current/model-providers/') +api.add_resource(ModelProviderModelValidateApi, + '/workspaces/current/model-providers//models/validate') +api.add_resource(ModelProviderModelUpdateApi, + '/workspaces/current/model-providers//models') +api.add_resource(PreferredProviderTypeUpdateApi, + '/workspaces/current/model-providers//preferred-provider-type') +api.add_resource(ModelProviderModelParameterRuleApi, + '/workspaces/current/model-providers//models/parameter-rules') +api.add_resource(ModelProviderPaymentCheckoutUrlApi, + '/workspaces/current/model-providers//checkout-url') diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py new file mode 100644 index 000000000..33f8edfed --- /dev/null +++ b/api/controllers/console/workspace/models.py @@ -0,0 +1,108 @@ +from flask_login import login_required, current_user +from flask_restful import Resource, reqparse + +from controllers.console import api +from controllers.console.setup import setup_required +from controllers.console.wraps import account_initialization_required +from core.model_providers.model_provider_factory import ModelProviderFactory +from core.model_providers.models.entity.model_params import ModelType +from models.provider import ProviderType +from services.provider_service import ProviderService + + +class DefaultModelApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def get(self): + parser = reqparse.RequestParser() + parser.add_argument('model_type', type=str, required=True, nullable=False, + choices=['text-generation', 'embeddings', 'speech2text'], location='args') + args = parser.parse_args() + + tenant_id = current_user.current_tenant_id + + provider_service = ProviderService() + default_model = provider_service.get_default_model_of_model_type( + tenant_id=tenant_id, + model_type=args['model_type'] + ) + + if not default_model: + return None + + model_provider = ModelProviderFactory.get_preferred_model_provider( + tenant_id, + default_model.provider_name + ) + + if not model_provider: + return { + 'model_name': default_model.model_name, + 'model_type': default_model.model_type, + 'model_provider': { + 'provider_name': default_model.provider_name + } + } + + provider = model_provider.provider + rst = { + 'model_name': default_model.model_name, + 'model_type': default_model.model_type, + 'model_provider': { + 'provider_name': provider.provider_name, + 'provider_type': provider.provider_type + } + } + + model_provider_rules = ModelProviderFactory.get_provider_rule(default_model.provider_name) + if provider.provider_type == ProviderType.SYSTEM.value: + rst['model_provider']['quota_type'] = provider.quota_type + rst['model_provider']['quota_unit'] = model_provider_rules['system_config']['quota_unit'] + rst['model_provider']['quota_limit'] = provider.quota_limit + rst['model_provider']['quota_used'] = provider.quota_used + + return rst + + @setup_required + @login_required + @account_initialization_required + def post(self): + parser = reqparse.RequestParser() + parser.add_argument('model_name', type=str, required=True, nullable=False, location='json') + parser.add_argument('model_type', type=str, required=True, nullable=False, + choices=['text-generation', 'embeddings', 'speech2text'], location='json') + parser.add_argument('provider_name', type=str, required=True, nullable=False, location='json') + args = parser.parse_args() + + provider_service = ProviderService() + provider_service.update_default_model_of_model_type( + tenant_id=current_user.current_tenant_id, + model_type=args['model_type'], + provider_name=args['provider_name'], + model_name=args['model_name'] + ) + + return {'result': 'success'} + + +class ValidModelApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def get(self, model_type): + ModelType.value_of(model_type) + + provider_service = ProviderService() + valid_models = provider_service.get_valid_model_list( + tenant_id=current_user.current_tenant_id, + model_type=model_type + ) + + return valid_models + + +api.add_resource(DefaultModelApi, '/workspaces/current/default-model') +api.add_resource(ValidModelApi, '/workspaces/current/models/model-type/') diff --git a/api/controllers/console/workspace/providers.py b/api/controllers/console/workspace/providers.py new file mode 100644 index 000000000..b6f9c3c69 --- /dev/null +++ b/api/controllers/console/workspace/providers.py @@ -0,0 +1,130 @@ +# -*- coding:utf-8 -*- +from flask_login import login_required, current_user +from flask_restful import Resource, reqparse +from werkzeug.exceptions import Forbidden + +from controllers.console import api +from controllers.console.setup import setup_required +from controllers.console.wraps import account_initialization_required +from core.model_providers.providers.base import CredentialsValidateFailedError +from models.provider import ProviderType +from services.provider_service import ProviderService + + +class ProviderListApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def get(self): + tenant_id = current_user.current_tenant_id + + """ + If the type is AZURE_OPENAI, decode and return the four fields of azure_api_type, azure_api_version:, + azure_api_base, azure_api_key as an object, where azure_api_key displays the first 6 bits in plaintext, and the + rest is replaced by * and the last two bits are displayed in plaintext + + If the type is other, decode and return the Token field directly, the field displays the first 6 bits in + plaintext, the rest is replaced by * and the last two bits are displayed in plaintext + """ + + provider_service = ProviderService() + provider_info_list = provider_service.get_provider_list(tenant_id) + + provider_list = [ + { + 'provider_name': p['provider_name'], + 'provider_type': p['provider_type'], + 'is_valid': p['is_valid'], + 'last_used': p['last_used'], + 'is_enabled': p['is_valid'], + **({ + 'quota_type': p['quota_type'], + 'quota_limit': p['quota_limit'], + 'quota_used': p['quota_used'] + } if p['provider_type'] == ProviderType.SYSTEM.value else {}), + 'token': (p['config'] if p['provider_name'] != 'openai' else p['config']['openai_api_key']) + if p['config'] else None + } + for name, provider_info in provider_info_list.items() + for p in provider_info['providers'] + ] + + return provider_list + + +class ProviderTokenApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def post(self, provider): + # The role of the current user in the ta table must be admin or owner + if current_user.current_tenant.current_role not in ['admin', 'owner']: + raise Forbidden() + + parser = reqparse.RequestParser() + parser.add_argument('token', required=True, nullable=False, location='json') + args = parser.parse_args() + + if provider == 'openai': + args['token'] = { + 'openai_api_key': args['token'] + } + + provider_service = ProviderService() + try: + provider_service.save_custom_provider_config( + tenant_id=current_user.current_tenant_id, + provider_name=provider, + config=args['token'] + ) + except CredentialsValidateFailedError as ex: + raise ValueError(str(ex)) + + return {'result': 'success'}, 201 + + +class ProviderTokenValidateApi(Resource): + + @setup_required + @login_required + @account_initialization_required + def post(self, provider): + parser = reqparse.RequestParser() + parser.add_argument('token', required=True, nullable=False, location='json') + args = parser.parse_args() + + provider_service = ProviderService() + + if provider == 'openai': + args['token'] = { + 'openai_api_key': args['token'] + } + + result = True + error = None + + try: + provider_service.custom_provider_config_validate( + provider_name=provider, + config=args['token'] + ) + except CredentialsValidateFailedError as ex: + result = False + error = str(ex) + + response = {'result': 'success' if result else 'error'} + + if not result: + response['error'] = error + + return response + + +api.add_resource(ProviderTokenApi, '/workspaces/current/providers//token', + endpoint='workspaces_current_providers_token') # PUT for updating provider token +api.add_resource(ProviderTokenValidateApi, '/workspaces/current/providers//token-validate', + endpoint='workspaces_current_providers_token_validate') # POST for validating provider token + +api.add_resource(ProviderListApi, '/workspaces/current/providers') # GET for getting providers list diff --git a/api/controllers/console/workspace/workspace.py b/api/controllers/console/workspace/workspace.py index 2ad457c79..8b0237eb2 100644 --- a/api/controllers/console/workspace/workspace.py +++ b/api/controllers/console/workspace/workspace.py @@ -30,7 +30,7 @@ tenant_fields = { 'created_at': TimestampField, 'role': fields.String, 'providers': fields.List(fields.Nested(provider_fields)), - 'in_trail': fields.Boolean, + 'in_trial': fields.Boolean, 'trial_end_reason': fields.String, } diff --git a/api/controllers/service_api/app/app.py b/api/controllers/service_api/app/app.py index 7c185ec63..481133367 100644 --- a/api/controllers/service_api/app/app.py +++ b/api/controllers/service_api/app/app.py @@ -4,8 +4,6 @@ from flask_restful import fields, marshal_with from controllers.service_api import api from controllers.service_api.wraps import AppApiResource -from core.llm.llm_builder import LLMBuilder -from models.provider import ProviderName from models.model import App @@ -35,13 +33,12 @@ class AppParameterApi(AppApiResource): def get(self, app_model: App, end_user): """Retrieve app parameters.""" app_model_config = app_model.app_model_config - provider_name = LLMBuilder.get_default_provider(app_model.tenant_id, 'whisper-1') return { 'opening_statement': app_model_config.opening_statement, 'suggested_questions': app_model_config.suggested_questions_list, 'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict, - 'speech_to_text': app_model_config.speech_to_text_dict if provider_name == ProviderName.OPENAI.value else { 'enabled': False }, + 'speech_to_text': app_model_config.speech_to_text_dict, 'more_like_this': app_model_config.more_like_this_dict, 'user_input_form': app_model_config.user_input_form_list } diff --git a/api/controllers/service_api/app/audio.py b/api/controllers/service_api/app/audio.py index 470afc6b4..4b03de063 100644 --- a/api/controllers/service_api/app/audio.py +++ b/api/controllers/service_api/app/audio.py @@ -9,7 +9,7 @@ from controllers.service_api.app.error import AppUnavailableError, ProviderNotIn ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, UnsupportedAudioTypeError, \ ProviderNotSupportSpeechToTextError from controllers.service_api.wraps import AppApiResource -from core.llm.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from models.model import App, AppModelConfig from services.audio_service import AudioService diff --git a/api/controllers/service_api/app/completion.py b/api/controllers/service_api/app/completion.py index 448c408bc..2b802dc71 100644 --- a/api/controllers/service_api/app/completion.py +++ b/api/controllers/service_api/app/completion.py @@ -14,7 +14,7 @@ from controllers.service_api.app.error import AppUnavailableError, ProviderNotIn ProviderModelCurrentlyNotSupportError from controllers.service_api.wraps import AppApiResource from core.conversation_message_task import PubHandler -from core.llm.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMAPIUnavailableError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value from services.completion_service import CompletionService diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index e00de0f9a..7cb4d4989 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -11,7 +11,7 @@ from controllers.service_api.app.error import ProviderNotInitializeError from controllers.service_api.dataset.error import ArchivedDocumentImmutableError, DocumentIndexingError, \ DatasetNotInitedError from controllers.service_api.wraps import DatasetApiResource -from core.llm.error import ProviderTokenNotInitError +from core.model_providers.error import ProviderTokenNotInitError from extensions.ext_database import db from extensions.ext_storage import storage from models.model import UploadFile diff --git a/api/controllers/web/app.py b/api/controllers/web/app.py index f4e268941..dd6670734 100644 --- a/api/controllers/web/app.py +++ b/api/controllers/web/app.py @@ -4,8 +4,6 @@ from flask_restful import marshal_with, fields from controllers.web import api from controllers.web.wraps import WebApiResource -from core.llm.llm_builder import LLMBuilder -from models.provider import ProviderName from models.model import App @@ -34,13 +32,12 @@ class AppParameterApi(WebApiResource): def get(self, app_model: App, end_user): """Retrieve app parameters.""" app_model_config = app_model.app_model_config - provider_name = LLMBuilder.get_default_provider(app_model.tenant_id, 'whisper-1') return { 'opening_statement': app_model_config.opening_statement, 'suggested_questions': app_model_config.suggested_questions_list, 'suggested_questions_after_answer': app_model_config.suggested_questions_after_answer_dict, - 'speech_to_text': app_model_config.speech_to_text_dict if provider_name == ProviderName.OPENAI.value else { 'enabled': False }, + 'speech_to_text': app_model_config.speech_to_text_dict, 'more_like_this': app_model_config.more_like_this_dict, 'user_input_form': app_model_config.user_input_form_list } diff --git a/api/controllers/web/audio.py b/api/controllers/web/audio.py index 3e3fe3a28..b3272de1c 100644 --- a/api/controllers/web/audio.py +++ b/api/controllers/web/audio.py @@ -10,7 +10,7 @@ from controllers.web.error import AppUnavailableError, ProviderNotInitializeErro ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError, NoAudioUploadedError, AudioTooLargeError, \ UnsupportedAudioTypeError, ProviderNotSupportSpeechToTextError from controllers.web.wraps import WebApiResource -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from services.audio_service import AudioService from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, \ diff --git a/api/controllers/web/completion.py b/api/controllers/web/completion.py index db2f770e5..4325362a5 100644 --- a/api/controllers/web/completion.py +++ b/api/controllers/web/completion.py @@ -14,7 +14,7 @@ from controllers.web.error import AppUnavailableError, ConversationCompletedErro ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError from controllers.web.wraps import WebApiResource from core.conversation_message_task import PubHandler -from core.llm.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIUnavailableError, LLMAuthorizationError, LLMAPIConnectionError, \ LLMRateLimitError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value from services.completion_service import CompletionService diff --git a/api/controllers/web/message.py b/api/controllers/web/message.py index 3d978a109..f25f1e5af 100644 --- a/api/controllers/web/message.py +++ b/api/controllers/web/message.py @@ -14,7 +14,7 @@ from controllers.web.error import NotChatAppError, CompletionRequestError, Provi AppMoreLikeThisDisabledError, NotCompletionAppError, AppSuggestedQuestionsAfterAnswerDisabledError, \ ProviderQuotaExceededError, ProviderModelCurrentlyNotSupportError from controllers.web.wraps import WebApiResource -from core.llm.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ +from core.model_providers.error import LLMRateLimitError, LLMBadRequestError, LLMAuthorizationError, LLMAPIConnectionError, \ ProviderTokenNotInitError, LLMAPIUnavailableError, QuotaExceededError, ModelCurrentlyNotSupportError from libs.helper import uuid_value, TimestampField from services.completion_service import CompletionService diff --git a/api/core/__init__.py b/api/core/__init__.py index 2dc9a9e86..e69de29bb 100644 --- a/api/core/__init__.py +++ b/api/core/__init__.py @@ -1,36 +0,0 @@ -import os -from typing import Optional - -import langchain -from flask import Flask -from pydantic import BaseModel - -from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler -from core.prompt.prompt_template import OneLineFormatter - - -class HostedOpenAICredential(BaseModel): - api_key: str - - -class HostedAnthropicCredential(BaseModel): - api_key: str - - -class HostedLLMCredentials(BaseModel): - openai: Optional[HostedOpenAICredential] = None - anthropic: Optional[HostedAnthropicCredential] = None - - -hosted_llm_credentials = HostedLLMCredentials() - - -def init_app(app: Flask): - if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == 'true': - langchain.verbose = True - - if app.config.get("OPENAI_API_KEY"): - hosted_llm_credentials.openai = HostedOpenAICredential(api_key=app.config.get("OPENAI_API_KEY")) - - if app.config.get("ANTHROPIC_API_KEY"): - hosted_llm_credentials.anthropic = HostedAnthropicCredential(api_key=app.config.get("ANTHROPIC_API_KEY")) diff --git a/api/core/agent/agent/calc_token_mixin.py b/api/core/agent/agent/calc_token_mixin.py index a07b9f2ad..97d2b7740 100644 --- a/api/core/agent/agent/calc_token_mixin.py +++ b/api/core/agent/agent/calc_token_mixin.py @@ -1,20 +1,17 @@ -from typing import cast, List +from typing import List -from langchain import OpenAI -from langchain.base_language import BaseLanguageModel -from langchain.chat_models.openai import ChatOpenAI from langchain.schema import BaseMessage -from core.constant import llm_constant +from core.model_providers.models.entity.message import to_prompt_messages +from core.model_providers.models.llm.base import BaseLLM class CalcTokenMixin: - def get_num_tokens_from_messages(self, llm: BaseLanguageModel, messages: List[BaseMessage], **kwargs) -> int: - llm = cast(ChatOpenAI, llm) - return llm.get_num_tokens_from_messages(messages) + def get_num_tokens_from_messages(self, model_instance: BaseLLM, messages: List[BaseMessage], **kwargs) -> int: + return model_instance.get_num_tokens(to_prompt_messages(messages)) - def get_message_rest_tokens(self, llm: BaseLanguageModel, messages: List[BaseMessage], **kwargs) -> int: + def get_message_rest_tokens(self, model_instance: BaseLLM, messages: List[BaseMessage], **kwargs) -> int: """ Got the rest tokens available for the model after excluding messages tokens and completion max tokens @@ -22,10 +19,9 @@ class CalcTokenMixin: :param messages: :return: """ - llm = cast(ChatOpenAI, llm) - llm_max_tokens = llm_constant.max_context_token_length[llm.model_name] - completion_max_tokens = llm.max_tokens - used_tokens = self.get_num_tokens_from_messages(llm, messages, **kwargs) + llm_max_tokens = model_instance.model_rules.max_tokens.max + completion_max_tokens = model_instance.model_kwargs.max_tokens + used_tokens = self.get_num_tokens_from_messages(model_instance, messages, **kwargs) rest_tokens = llm_max_tokens - completion_max_tokens - used_tokens return rest_tokens diff --git a/api/core/agent/agent/multi_dataset_router_agent.py b/api/core/agent/agent/multi_dataset_router_agent.py index 34dacaee3..c23bf2449 100644 --- a/api/core/agent/agent/multi_dataset_router_agent.py +++ b/api/core/agent/agent/multi_dataset_router_agent.py @@ -4,9 +4,11 @@ from langchain.agents import OpenAIFunctionsAgent, BaseSingleActionAgent from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.prompts.chat import BaseMessagePromptTemplate -from langchain.schema import AgentAction, AgentFinish, BaseLanguageModel, SystemMessage +from langchain.schema import AgentAction, AgentFinish, SystemMessage +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool +from core.model_providers.models.llm.base import BaseLLM from core.tool.dataset_retriever_tool import DatasetRetrieverTool @@ -14,6 +16,12 @@ class MultiDatasetRouterAgent(OpenAIFunctionsAgent): """ An Multi Dataset Retrieve Agent driven by Router. """ + model_instance: BaseLLM + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True def should_use_agent(self, query: str): """ diff --git a/api/core/agent/agent/openai_function_call.py b/api/core/agent/agent/openai_function_call.py index 090d35d97..3966525e2 100644 --- a/api/core/agent/agent/openai_function_call.py +++ b/api/core/agent/agent/openai_function_call.py @@ -6,7 +6,8 @@ from langchain.agents.openai_functions_agent.base import _parse_ai_message, \ from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.prompts.chat import BaseMessagePromptTemplate -from langchain.schema import AgentAction, AgentFinish, SystemMessage, BaseLanguageModel +from langchain.schema import AgentAction, AgentFinish, SystemMessage +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from core.agent.agent.calc_token_mixin import ExceededLLMTokensLimitError @@ -84,7 +85,7 @@ class AutoSummarizingOpenAIFunctionCallAgent(OpenAIFunctionsAgent, OpenAIFunctio # summarize messages if rest_tokens < 0 try: - messages = self.summarize_messages_if_needed(self.llm, messages, functions=self.functions) + messages = self.summarize_messages_if_needed(messages, functions=self.functions) except ExceededLLMTokensLimitError as e: return AgentFinish(return_values={"output": str(e)}, log=str(e)) diff --git a/api/core/agent/agent/openai_function_call_summarize_mixin.py b/api/core/agent/agent/openai_function_call_summarize_mixin.py index 0436de207..a4745e772 100644 --- a/api/core/agent/agent/openai_function_call_summarize_mixin.py +++ b/api/core/agent/agent/openai_function_call_summarize_mixin.py @@ -3,20 +3,28 @@ from typing import cast, List from langchain.chat_models import ChatOpenAI from langchain.chat_models.openai import _convert_message_to_dict from langchain.memory.summary import SummarizerMixin -from langchain.schema import SystemMessage, HumanMessage, BaseMessage, AIMessage, BaseLanguageModel +from langchain.schema import SystemMessage, HumanMessage, BaseMessage, AIMessage +from langchain.schema.language_model import BaseLanguageModel from pydantic import BaseModel from core.agent.agent.calc_token_mixin import ExceededLLMTokensLimitError, CalcTokenMixin +from core.model_providers.models.llm.base import BaseLLM class OpenAIFunctionCallSummarizeMixin(BaseModel, CalcTokenMixin): moving_summary_buffer: str = "" moving_summary_index: int = 0 summary_llm: BaseLanguageModel + model_instance: BaseLLM - def summarize_messages_if_needed(self, llm: BaseLanguageModel, messages: List[BaseMessage], **kwargs) -> List[BaseMessage]: + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + def summarize_messages_if_needed(self, messages: List[BaseMessage], **kwargs) -> List[BaseMessage]: # calculate rest tokens and summarize previous function observation messages if rest_tokens < 0 - rest_tokens = self.get_message_rest_tokens(llm, messages, **kwargs) + rest_tokens = self.get_message_rest_tokens(self.model_instance, messages, **kwargs) rest_tokens = rest_tokens - 20 # to deal with the inaccuracy of rest_tokens if rest_tokens >= 0: return messages diff --git a/api/core/agent/agent/openai_multi_function_call.py b/api/core/agent/agent/openai_multi_function_call.py index 1524fc697..978037718 100644 --- a/api/core/agent/agent/openai_multi_function_call.py +++ b/api/core/agent/agent/openai_multi_function_call.py @@ -6,7 +6,8 @@ from langchain.agents.openai_functions_multi_agent.base import OpenAIMultiFuncti from langchain.callbacks.base import BaseCallbackManager from langchain.callbacks.manager import Callbacks from langchain.prompts.chat import BaseMessagePromptTemplate -from langchain.schema import AgentAction, AgentFinish, SystemMessage, BaseLanguageModel +from langchain.schema import AgentAction, AgentFinish, SystemMessage +from langchain.schema.language_model import BaseLanguageModel from langchain.tools import BaseTool from core.agent.agent.calc_token_mixin import ExceededLLMTokensLimitError @@ -84,7 +85,7 @@ class AutoSummarizingOpenMultiAIFunctionCallAgent(OpenAIMultiFunctionsAgent, Ope # summarize messages if rest_tokens < 0 try: - messages = self.summarize_messages_if_needed(self.llm, messages, functions=self.functions) + messages = self.summarize_messages_if_needed(messages, functions=self.functions) except ExceededLLMTokensLimitError as e: return AgentFinish(return_values={"output": str(e)}, log=str(e)) diff --git a/api/core/agent/agent/structed_multi_dataset_router_agent.py b/api/core/agent/agent/structed_multi_dataset_router_agent.py new file mode 100644 index 000000000..ac1748611 --- /dev/null +++ b/api/core/agent/agent/structed_multi_dataset_router_agent.py @@ -0,0 +1,162 @@ +import re +from typing import List, Tuple, Any, Union, Sequence, Optional, cast + +from langchain import BasePromptTemplate +from langchain.agents import StructuredChatAgent, AgentOutputParser, Agent +from langchain.agents.structured_chat.base import HUMAN_MESSAGE_TEMPLATE +from langchain.base_language import BaseLanguageModel +from langchain.callbacks.base import BaseCallbackManager +from langchain.callbacks.manager import Callbacks +from langchain.prompts import SystemMessagePromptTemplate, HumanMessagePromptTemplate, ChatPromptTemplate +from langchain.schema import AgentAction, AgentFinish, OutputParserException +from langchain.tools import BaseTool +from langchain.agents.structured_chat.prompt import PREFIX, SUFFIX + +from core.model_providers.models.llm.base import BaseLLM +from core.tool.dataset_retriever_tool import DatasetRetrieverTool + +FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). +The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. +Valid "action" values: "Final Answer" or {tool_names} + +Provide only ONE action per $JSON_BLOB, as shown: + +``` +{{{{ + "action": $TOOL_NAME, + "action_input": $INPUT +}}}} +``` + +Follow this format: + +Question: input question to answer +Thought: consider previous and subsequent steps +Action: +``` +$JSON_BLOB +``` +Observation: action result +... (repeat Thought/Action/Observation N times) +Thought: I know what to respond +Action: +``` +{{{{ + "action": "Final Answer", + "action_input": "Final response to human" +}}}} +```""" + + +class StructuredMultiDatasetRouterAgent(StructuredChatAgent): + model_instance: BaseLLM + dataset_tools: Sequence[BaseTool] + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + def should_use_agent(self, query: str): + """ + return should use agent + Using the ReACT mode to determine whether an agent is needed is costly, + so it's better to just use an Agent for reasoning, which is cheaper. + + :param query: + :return: + """ + return True + + def plan( + self, + intermediate_steps: List[Tuple[AgentAction, str]], + callbacks: Callbacks = None, + **kwargs: Any, + ) -> Union[AgentAction, AgentFinish]: + """Given input, decided what to do. + + Args: + intermediate_steps: Steps the LLM has taken to date, + along with observations + callbacks: Callbacks to run. + **kwargs: User inputs. + + Returns: + Action specifying what tool to use. + """ + if len(self.dataset_tools) == 0: + return AgentFinish(return_values={"output": ''}, log='') + elif len(self.dataset_tools) == 1: + tool = next(iter(self.dataset_tools)) + tool = cast(DatasetRetrieverTool, tool) + rst = tool.run(tool_input={'dataset_id': tool.dataset_id, 'query': kwargs['input']}) + return AgentFinish(return_values={"output": rst}, log=rst) + + full_inputs = self.get_full_inputs(intermediate_steps, **kwargs) + full_output = self.llm_chain.predict(callbacks=callbacks, **full_inputs) + + try: + return self.output_parser.parse(full_output) + except OutputParserException: + return AgentFinish({"output": "I'm sorry, the answer of model is invalid, " + "I don't know how to respond to that."}, "") + @classmethod + def create_prompt( + cls, + tools: Sequence[BaseTool], + prefix: str = PREFIX, + suffix: str = SUFFIX, + human_message_template: str = HUMAN_MESSAGE_TEMPLATE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[List[str]] = None, + memory_prompts: Optional[List[BasePromptTemplate]] = None, + ) -> BasePromptTemplate: + tool_strings = [] + for tool in tools: + args_schema = re.sub("}", "}}}}", re.sub("{", "{{{{", str(tool.args))) + tool_strings.append(f"{tool.name}: {tool.description}, args: {args_schema}") + formatted_tools = "\n".join(tool_strings) + unique_tool_names = set(tool.name for tool in tools) + tool_names = ", ".join('"' + name + '"' for name in unique_tool_names) + format_instructions = format_instructions.format(tool_names=tool_names) + template = "\n\n".join([prefix, formatted_tools, format_instructions, suffix]) + if input_variables is None: + input_variables = ["input", "agent_scratchpad"] + _memory_prompts = memory_prompts or [] + messages = [ + SystemMessagePromptTemplate.from_template(template), + *_memory_prompts, + HumanMessagePromptTemplate.from_template(human_message_template), + ] + return ChatPromptTemplate(input_variables=input_variables, messages=messages) + + @classmethod + def from_llm_and_tools( + cls, + llm: BaseLanguageModel, + tools: Sequence[BaseTool], + callback_manager: Optional[BaseCallbackManager] = None, + output_parser: Optional[AgentOutputParser] = None, + prefix: str = PREFIX, + suffix: str = SUFFIX, + human_message_template: str = HUMAN_MESSAGE_TEMPLATE, + format_instructions: str = FORMAT_INSTRUCTIONS, + input_variables: Optional[List[str]] = None, + memory_prompts: Optional[List[BasePromptTemplate]] = None, + **kwargs: Any, + ) -> Agent: + return super().from_llm_and_tools( + llm=llm, + tools=tools, + callback_manager=callback_manager, + output_parser=output_parser, + prefix=prefix, + suffix=suffix, + human_message_template=human_message_template, + format_instructions=format_instructions, + input_variables=input_variables, + memory_prompts=memory_prompts, + dataset_tools=tools, + **kwargs, + ) diff --git a/api/core/agent/agent/structured_chat.py b/api/core/agent/agent/structured_chat.py index 8c3472845..96960cf80 100644 --- a/api/core/agent/agent/structured_chat.py +++ b/api/core/agent/agent/structured_chat.py @@ -14,7 +14,7 @@ from langchain.tools import BaseTool from langchain.agents.structured_chat.prompt import PREFIX, SUFFIX from core.agent.agent.calc_token_mixin import CalcTokenMixin, ExceededLLMTokensLimitError - +from core.model_providers.models.llm.base import BaseLLM FORMAT_INSTRUCTIONS = """Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). The nouns in the format of "Thought", "Action", "Action Input", "Final Answer" must be expressed in English. @@ -53,6 +53,12 @@ class AutoSummarizingStructuredChatAgent(StructuredChatAgent, CalcTokenMixin): moving_summary_buffer: str = "" moving_summary_index: int = 0 summary_llm: BaseLanguageModel + model_instance: BaseLLM + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True def should_use_agent(self, query: str): """ @@ -89,7 +95,7 @@ class AutoSummarizingStructuredChatAgent(StructuredChatAgent, CalcTokenMixin): if prompts: messages = prompts[0].to_messages() - rest_tokens = self.get_message_rest_tokens(self.llm_chain.llm, messages) + rest_tokens = self.get_message_rest_tokens(self.model_instance, messages) if rest_tokens < 0: full_inputs = self.summarize_messages(intermediate_steps, **kwargs) diff --git a/api/core/agent/agent_executor.py b/api/core/agent/agent_executor.py index da36533fd..f345e631d 100644 --- a/api/core/agent/agent_executor.py +++ b/api/core/agent/agent_executor.py @@ -3,7 +3,6 @@ import logging from typing import Union, Optional from langchain.agents import BaseSingleActionAgent, BaseMultiActionAgent -from langchain.base_language import BaseLanguageModel from langchain.callbacks.manager import Callbacks from langchain.memory.chat_memory import BaseChatMemory from langchain.tools import BaseTool @@ -13,14 +12,17 @@ from core.agent.agent.multi_dataset_router_agent import MultiDatasetRouterAgent from core.agent.agent.openai_function_call import AutoSummarizingOpenAIFunctionCallAgent from core.agent.agent.openai_multi_function_call import AutoSummarizingOpenMultiAIFunctionCallAgent from core.agent.agent.output_parser.structured_chat import StructuredChatOutputParser +from core.agent.agent.structed_multi_dataset_router_agent import StructuredMultiDatasetRouterAgent from core.agent.agent.structured_chat import AutoSummarizingStructuredChatAgent from langchain.agents import AgentExecutor as LCAgentExecutor +from core.model_providers.models.llm.base import BaseLLM from core.tool.dataset_retriever_tool import DatasetRetrieverTool class PlanningStrategy(str, enum.Enum): ROUTER = 'router' + REACT_ROUTER = 'react_router' REACT = 'react' FUNCTION_CALL = 'function_call' MULTI_FUNCTION_CALL = 'multi_function_call' @@ -28,10 +30,9 @@ class PlanningStrategy(str, enum.Enum): class AgentConfiguration(BaseModel): strategy: PlanningStrategy - llm: BaseLanguageModel + model_instance: BaseLLM tools: list[BaseTool] - summary_llm: BaseLanguageModel - dataset_llm: BaseLanguageModel + summary_model_instance: BaseLLM memory: Optional[BaseChatMemory] = None callbacks: Callbacks = None max_iterations: int = 6 @@ -60,36 +61,49 @@ class AgentExecutor: def _init_agent(self) -> Union[BaseSingleActionAgent | BaseMultiActionAgent]: if self.configuration.strategy == PlanningStrategy.REACT: agent = AutoSummarizingStructuredChatAgent.from_llm_and_tools( - llm=self.configuration.llm, + model_instance=self.configuration.model_instance, + llm=self.configuration.model_instance.client, tools=self.configuration.tools, output_parser=StructuredChatOutputParser(), - summary_llm=self.configuration.summary_llm, + summary_llm=self.configuration.summary_model_instance.client, verbose=True ) elif self.configuration.strategy == PlanningStrategy.FUNCTION_CALL: agent = AutoSummarizingOpenAIFunctionCallAgent.from_llm_and_tools( - llm=self.configuration.llm, + model_instance=self.configuration.model_instance, + llm=self.configuration.model_instance.client, tools=self.configuration.tools, extra_prompt_messages=self.configuration.memory.buffer if self.configuration.memory else None, # used for read chat histories memory - summary_llm=self.configuration.summary_llm, + summary_llm=self.configuration.summary_model_instance.client, verbose=True ) elif self.configuration.strategy == PlanningStrategy.MULTI_FUNCTION_CALL: agent = AutoSummarizingOpenMultiAIFunctionCallAgent.from_llm_and_tools( - llm=self.configuration.llm, + model_instance=self.configuration.model_instance, + llm=self.configuration.model_instance.client, tools=self.configuration.tools, extra_prompt_messages=self.configuration.memory.buffer if self.configuration.memory else None, # used for read chat histories memory - summary_llm=self.configuration.summary_llm, + summary_llm=self.configuration.summary_model_instance.client, verbose=True ) elif self.configuration.strategy == PlanningStrategy.ROUTER: self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool)] agent = MultiDatasetRouterAgent.from_llm_and_tools( - llm=self.configuration.dataset_llm, + model_instance=self.configuration.model_instance, + llm=self.configuration.model_instance.client, tools=self.configuration.tools, extra_prompt_messages=self.configuration.memory.buffer if self.configuration.memory else None, verbose=True ) + elif self.configuration.strategy == PlanningStrategy.REACT_ROUTER: + self.configuration.tools = [t for t in self.configuration.tools if isinstance(t, DatasetRetrieverTool)] + agent = StructuredMultiDatasetRouterAgent.from_llm_and_tools( + model_instance=self.configuration.model_instance, + llm=self.configuration.model_instance.client, + tools=self.configuration.tools, + output_parser=StructuredChatOutputParser(), + verbose=True + ) else: raise NotImplementedError(f"Unknown Agent Strategy: {self.configuration.strategy}") diff --git a/api/core/callback_handler/agent_loop_gather_callback_handler.py b/api/core/callback_handler/agent_loop_gather_callback_handler.py index bb81771c4..64fb1bf10 100644 --- a/api/core/callback_handler/agent_loop_gather_callback_handler.py +++ b/api/core/callback_handler/agent_loop_gather_callback_handler.py @@ -10,15 +10,16 @@ from langchain.schema import AgentAction, AgentFinish, LLMResult, ChatGeneration from core.callback_handler.entity.agent_loop import AgentLoop from core.conversation_message_task import ConversationMessageTask +from core.model_providers.models.llm.base import BaseLLM class AgentLoopGatherCallbackHandler(BaseCallbackHandler): """Callback Handler that prints to std out.""" raise_error: bool = True - def __init__(self, model_name, conversation_message_task: ConversationMessageTask) -> None: + def __init__(self, model_instant: BaseLLM, conversation_message_task: ConversationMessageTask) -> None: """Initialize callback handler.""" - self.model_name = model_name + self.model_instant = model_instant self.conversation_message_task = conversation_message_task self._agent_loops = [] self._current_loop = None @@ -152,7 +153,7 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler): self._current_loop.latency = self._current_loop.completed_at - self._current_loop.started_at self.conversation_message_task.on_agent_end( - self._message_agent_thought, self.model_name, self._current_loop + self._message_agent_thought, self.model_instant, self._current_loop ) self._agent_loops.append(self._current_loop) @@ -183,7 +184,7 @@ class AgentLoopGatherCallbackHandler(BaseCallbackHandler): ) self.conversation_message_task.on_agent_end( - self._message_agent_thought, self.model_name, self._current_loop + self._message_agent_thought, self.model_instant, self._current_loop ) self._agent_loops.append(self._current_loop) diff --git a/api/core/callback_handler/llm_callback_handler.py b/api/core/callback_handler/llm_callback_handler.py index 03f8ba262..89b498c3e 100644 --- a/api/core/callback_handler/llm_callback_handler.py +++ b/api/core/callback_handler/llm_callback_handler.py @@ -3,18 +3,20 @@ import time from typing import Any, Dict, List, Union from langchain.callbacks.base import BaseCallbackHandler -from langchain.schema import LLMResult, BaseMessage, BaseLanguageModel +from langchain.schema import LLMResult, BaseMessage from core.callback_handler.entity.llm_message import LLMMessage from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException +from core.model_providers.models.entity.message import to_prompt_messages, PromptMessage +from core.model_providers.models.llm.base import BaseLLM class LLMCallbackHandler(BaseCallbackHandler): raise_error: bool = True - def __init__(self, llm: BaseLanguageModel, + def __init__(self, model_instance: BaseLLM, conversation_message_task: ConversationMessageTask): - self.llm = llm + self.model_instance = model_instance self.llm_message = LLMMessage() self.start_at = None self.conversation_message_task = conversation_message_task @@ -46,7 +48,7 @@ class LLMCallbackHandler(BaseCallbackHandler): }) self.llm_message.prompt = real_prompts - self.llm_message.prompt_tokens = self.llm.get_num_tokens_from_messages(messages[0]) + self.llm_message.prompt_tokens = self.model_instance.get_num_tokens(to_prompt_messages(messages[0])) def on_llm_start( self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any @@ -58,7 +60,7 @@ class LLMCallbackHandler(BaseCallbackHandler): "text": prompts[0] }] - self.llm_message.prompt_tokens = self.llm.get_num_tokens(prompts[0]) + self.llm_message.prompt_tokens = self.model_instance.get_num_tokens([PromptMessage(content=prompts[0])]) def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None: end_at = time.perf_counter() @@ -68,7 +70,7 @@ class LLMCallbackHandler(BaseCallbackHandler): self.conversation_message_task.append_message_text(response.generations[0][0].text) self.llm_message.completion = response.generations[0][0].text - self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion) + self.llm_message.completion_tokens = self.model_instance.get_num_tokens([PromptMessage(content=self.llm_message.completion)]) self.conversation_message_task.save_message(self.llm_message) @@ -89,7 +91,9 @@ class LLMCallbackHandler(BaseCallbackHandler): if self.conversation_message_task.streaming: end_at = time.perf_counter() self.llm_message.latency = end_at - self.start_at - self.llm_message.completion_tokens = self.llm.get_num_tokens(self.llm_message.completion) + self.llm_message.completion_tokens = self.model_instance.get_num_tokens( + [PromptMessage(content=self.llm_message.completion)] + ) self.conversation_message_task.save_message(llm_message=self.llm_message, by_stopped=True) else: logging.error(error) diff --git a/api/core/callback_handler/main_chain_gather_callback_handler.py b/api/core/callback_handler/main_chain_gather_callback_handler.py index e03ecd79f..fc0a65e42 100644 --- a/api/core/callback_handler/main_chain_gather_callback_handler.py +++ b/api/core/callback_handler/main_chain_gather_callback_handler.py @@ -5,9 +5,7 @@ from typing import Any, Dict, Union from langchain.callbacks.base import BaseCallbackHandler -from core.callback_handler.agent_loop_gather_callback_handler import AgentLoopGatherCallbackHandler from core.callback_handler.entity.chain_result import ChainResult -from core.constant import llm_constant from core.conversation_message_task import ConversationMessageTask diff --git a/api/core/completion.py b/api/core/completion.py index 486a3b725..28d0cec8d 100644 --- a/api/core/completion.py +++ b/api/core/completion.py @@ -2,27 +2,19 @@ import logging import re from typing import Optional, List, Union, Tuple -from langchain.base_language import BaseLanguageModel -from langchain.callbacks.base import BaseCallbackHandler -from langchain.chat_models.base import BaseChatModel -from langchain.llms import BaseLLM -from langchain.schema import BaseMessage, HumanMessage +from langchain.schema import BaseMessage from requests.exceptions import ChunkedEncodingError from core.agent.agent_executor import AgentExecuteResult, PlanningStrategy from core.callback_handler.main_chain_gather_callback_handler import MainChainGatherCallbackHandler -from core.constant import llm_constant from core.callback_handler.llm_callback_handler import LLMCallbackHandler -from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, \ - DifyStdOutCallbackHandler from core.conversation_message_task import ConversationMessageTask, ConversationTaskStoppedException -from core.llm.error import LLMBadRequestError -from core.llm.fake import FakeLLM -from core.llm.llm_builder import LLMBuilder -from core.llm.streamable_chat_open_ai import StreamableChatOpenAI -from core.llm.streamable_open_ai import StreamableOpenAI +from core.model_providers.error import LLMBadRequestError from core.memory.read_only_conversation_token_db_buffer_shared_memory import \ ReadOnlyConversationTokenDBBufferSharedMemory +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.message import PromptMessage, to_prompt_messages +from core.model_providers.models.llm.base import BaseLLM from core.orchestrator_rule_parser import OrchestratorRuleParser from core.prompt.prompt_builder import PromptBuilder from core.prompt.prompt_template import JinjaPromptTemplate @@ -51,12 +43,10 @@ class Completion: inputs = conversation.inputs - rest_tokens_for_context_and_memory = cls.get_validate_rest_tokens( - mode=app.mode, + final_model_instance = ModelFactory.get_text_generation_model_from_model_config( tenant_id=app.tenant_id, - app_model_config=app_model_config, - query=query, - inputs=inputs + model_config=app_model_config.model_dict, + streaming=streaming ) conversation_message_task = ConversationMessageTask( @@ -68,10 +58,17 @@ class Completion: is_override=is_override, inputs=inputs, query=query, - streaming=streaming + streaming=streaming, + model_instance=final_model_instance ) - chain_callback = MainChainGatherCallbackHandler(conversation_message_task) + rest_tokens_for_context_and_memory = cls.get_validate_rest_tokens( + mode=app.mode, + model_instance=final_model_instance, + app_model_config=app_model_config, + query=query, + inputs=inputs + ) # init orchestrator rule parser orchestrator_rule_parser = OrchestratorRuleParser( @@ -80,6 +77,7 @@ class Completion: ) # parse sensitive_word_avoidance_chain + chain_callback = MainChainGatherCallbackHandler(conversation_message_task) sensitive_word_avoidance_chain = orchestrator_rule_parser.to_sensitive_word_avoidance_chain([chain_callback]) if sensitive_word_avoidance_chain: query = sensitive_word_avoidance_chain.run(query) @@ -102,15 +100,14 @@ class Completion: # run the final llm try: cls.run_final_llm( - tenant_id=app.tenant_id, + model_instance=final_model_instance, mode=app.mode, app_model_config=app_model_config, query=query, inputs=inputs, agent_execute_result=agent_execute_result, conversation_message_task=conversation_message_task, - memory=memory, - streaming=streaming + memory=memory ) except ConversationTaskStoppedException: return @@ -121,31 +118,20 @@ class Completion: return @classmethod - def run_final_llm(cls, tenant_id: str, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict, + def run_final_llm(cls, model_instance: BaseLLM, mode: str, app_model_config: AppModelConfig, query: str, inputs: dict, agent_execute_result: Optional[AgentExecuteResult], conversation_message_task: ConversationMessageTask, - memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory], streaming: bool): + memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]): # When no extra pre prompt is specified, # the output of the agent can be used directly as the main output content without calling LLM again + fake_response = None if not app_model_config.pre_prompt and agent_execute_result and agent_execute_result.output \ and agent_execute_result.strategy != PlanningStrategy.ROUTER: - final_llm = FakeLLM(response=agent_execute_result.output, - origin_llm=agent_execute_result.configuration.llm, - streaming=streaming) - final_llm.callbacks = cls.get_llm_callbacks(final_llm, streaming, conversation_message_task) - response = final_llm.generate([[HumanMessage(content=query)]]) - return response - - final_llm = LLMBuilder.to_llm_from_model( - tenant_id=tenant_id, - model=app_model_config.model_dict, - streaming=streaming - ) + fake_response = agent_execute_result.output # get llm prompt - prompt, stop_words = cls.get_main_llm_prompt( + prompt_messages, stop_words = cls.get_main_llm_prompt( mode=mode, - llm=final_llm, model=app_model_config.model_dict, pre_prompt=app_model_config.pre_prompt, query=query, @@ -154,25 +140,26 @@ class Completion: memory=memory ) - final_llm.callbacks = cls.get_llm_callbacks(final_llm, streaming, conversation_message_task) - cls.recale_llm_max_tokens( - final_llm=final_llm, - model=app_model_config.model_dict, - prompt=prompt, - mode=mode + model_instance=model_instance, + prompt_messages=prompt_messages, ) - response = final_llm.generate([prompt], stop_words) + response = model_instance.run( + messages=prompt_messages, + stop=stop_words, + callbacks=[LLMCallbackHandler(model_instance, conversation_message_task)], + fake_response=fake_response + ) return response @classmethod - def get_main_llm_prompt(cls, mode: str, llm: BaseLanguageModel, model: dict, + def get_main_llm_prompt(cls, mode: str, model: dict, pre_prompt: str, query: str, inputs: dict, agent_execute_result: Optional[AgentExecuteResult], memory: Optional[ReadOnlyConversationTokenDBBufferSharedMemory]) -> \ - Tuple[Union[str | List[BaseMessage]], Optional[List[str]]]: + Tuple[List[PromptMessage], Optional[List[str]]]: if mode == 'completion': prompt_template = JinjaPromptTemplate.from_template( template=("""Use the following context as your learned knowledge, inside XML tags. @@ -200,11 +187,7 @@ And answer according to the language of the user's question. **prompt_inputs ) - if isinstance(llm, BaseChatModel): - # use chat llm as completion model - return [HumanMessage(content=prompt_content)], None - else: - return prompt_content, None + return [PromptMessage(content=prompt_content)], None else: messages: List[BaseMessage] = [] @@ -249,12 +232,14 @@ And answer according to the language of the user's question. inputs=human_inputs ) - curr_message_tokens = memory.llm.get_num_tokens_from_messages([tmp_human_message]) - model_name = model['name'] - max_tokens = model.get("completion_params").get('max_tokens') - rest_tokens = llm_constant.max_context_token_length[model_name] \ - - max_tokens - curr_message_tokens - rest_tokens = max(rest_tokens, 0) + if memory.model_instance.model_rules.max_tokens.max: + curr_message_tokens = memory.model_instance.get_num_tokens(to_prompt_messages([tmp_human_message])) + max_tokens = model.get("completion_params").get('max_tokens') + rest_tokens = memory.model_instance.model_rules.max_tokens.max - max_tokens - curr_message_tokens + rest_tokens = max(rest_tokens, 0) + else: + rest_tokens = 2000 + histories = cls.get_history_messages_from_memory(memory, rest_tokens) human_message_prompt += "\n\n" if human_message_prompt else "" human_message_prompt += "Here is the chat histories between human and assistant, " \ @@ -274,17 +259,7 @@ And answer according to the language of the user's question. for message in messages: message.content = re.sub(r'<\|.*?\|>', '', message.content) - return messages, ['\nHuman:', ''] - - @classmethod - def get_llm_callbacks(cls, llm: BaseLanguageModel, - streaming: bool, - conversation_message_task: ConversationMessageTask) -> List[BaseCallbackHandler]: - llm_callback_handler = LLMCallbackHandler(llm, conversation_message_task) - if streaming: - return [llm_callback_handler, DifyStreamingStdOutCallbackHandler()] - else: - return [llm_callback_handler, DifyStdOutCallbackHandler()] + return to_prompt_messages(messages), ['\nHuman:', ''] @classmethod def get_history_messages_from_memory(cls, memory: ReadOnlyConversationTokenDBBufferSharedMemory, @@ -300,15 +275,15 @@ And answer according to the language of the user's question. conversation: Conversation, **kwargs) -> ReadOnlyConversationTokenDBBufferSharedMemory: # only for calc token in memory - memory_llm = LLMBuilder.to_llm_from_model( + memory_model_instance = ModelFactory.get_text_generation_model_from_model_config( tenant_id=tenant_id, - model=app_model_config.model_dict + model_config=app_model_config.model_dict ) # use llm config from conversation memory = ReadOnlyConversationTokenDBBufferSharedMemory( conversation=conversation, - llm=memory_llm, + model_instance=memory_model_instance, max_token_limit=kwargs.get("max_token_limit", 2048), memory_key=kwargs.get("memory_key", "chat_history"), return_messages=kwargs.get("return_messages", True), @@ -320,21 +295,20 @@ And answer according to the language of the user's question. return memory @classmethod - def get_validate_rest_tokens(cls, mode: str, tenant_id: str, app_model_config: AppModelConfig, + def get_validate_rest_tokens(cls, mode: str, model_instance: BaseLLM, app_model_config: AppModelConfig, query: str, inputs: dict) -> int: - llm = LLMBuilder.to_llm_from_model( - tenant_id=tenant_id, - model=app_model_config.model_dict - ) + model_limited_tokens = model_instance.model_rules.max_tokens.max + max_tokens = model_instance.get_model_kwargs().max_tokens - model_name = app_model_config.model_dict.get("name") - model_limited_tokens = llm_constant.max_context_token_length[model_name] - max_tokens = app_model_config.model_dict.get("completion_params").get('max_tokens') + if model_limited_tokens is None: + return -1 + + if max_tokens is None: + max_tokens = 0 # get prompt without memory and context - prompt, _ = cls.get_main_llm_prompt( + prompt_messages, _ = cls.get_main_llm_prompt( mode=mode, - llm=llm, model=app_model_config.model_dict, pre_prompt=app_model_config.pre_prompt, query=query, @@ -343,9 +317,7 @@ And answer according to the language of the user's question. memory=None ) - prompt_tokens = llm.get_num_tokens(prompt) if isinstance(prompt, str) \ - else llm.get_num_tokens_from_messages(prompt) - + prompt_tokens = model_instance.get_num_tokens(prompt_messages) rest_tokens = model_limited_tokens - max_tokens - prompt_tokens if rest_tokens < 0: raise LLMBadRequestError("Query or prefix prompt is too long, you can reduce the prefix prompt, " @@ -354,36 +326,40 @@ And answer according to the language of the user's question. return rest_tokens @classmethod - def recale_llm_max_tokens(cls, final_llm: BaseLanguageModel, model: dict, - prompt: Union[str, List[BaseMessage]], mode: str): + def recale_llm_max_tokens(cls, model_instance: BaseLLM, prompt_messages: List[PromptMessage]): # recalc max_tokens if sum(prompt_token + max_tokens) over model token limit - model_name = model.get("name") - model_limited_tokens = llm_constant.max_context_token_length[model_name] - max_tokens = model.get("completion_params").get('max_tokens') + model_limited_tokens = model_instance.model_rules.max_tokens.max + max_tokens = model_instance.get_model_kwargs().max_tokens - if mode == 'completion' and isinstance(final_llm, BaseLLM): - prompt_tokens = final_llm.get_num_tokens(prompt) - else: - prompt_tokens = final_llm.get_num_tokens_from_messages(prompt) + if model_limited_tokens is None: + return + + if max_tokens is None: + max_tokens = 0 + + prompt_tokens = model_instance.get_num_tokens(prompt_messages) if prompt_tokens + max_tokens > model_limited_tokens: max_tokens = max(model_limited_tokens - prompt_tokens, 16) - final_llm.max_tokens = max_tokens + + # update model instance max tokens + model_kwargs = model_instance.get_model_kwargs() + model_kwargs.max_tokens = max_tokens + model_instance.set_model_kwargs(model_kwargs) @classmethod def generate_more_like_this(cls, task_id: str, app: App, message: Message, pre_prompt: str, app_model_config: AppModelConfig, user: Account, streaming: bool): - llm = LLMBuilder.to_llm_from_model( + final_model_instance = ModelFactory.get_text_generation_model_from_model_config( tenant_id=app.tenant_id, - model=app_model_config.model_dict, + model_config=app_model_config.model_dict, streaming=streaming ) # get llm prompt - original_prompt, _ = cls.get_main_llm_prompt( + old_prompt_messages, _ = cls.get_main_llm_prompt( mode="completion", - llm=llm, model=app_model_config.model_dict, pre_prompt=pre_prompt, query=message.query, @@ -395,10 +371,9 @@ And answer according to the language of the user's question. original_completion = message.answer.strip() prompt = MORE_LIKE_THIS_GENERATE_PROMPT - prompt = prompt.format(prompt=original_prompt, original_completion=original_completion) + prompt = prompt.format(prompt=old_prompt_messages[0].content, original_completion=original_completion) - if isinstance(llm, BaseChatModel): - prompt = [HumanMessage(content=prompt)] + prompt_messages = [PromptMessage(content=prompt)] conversation_message_task = ConversationMessageTask( task_id=task_id, @@ -408,16 +383,16 @@ And answer according to the language of the user's question. inputs=message.inputs, query=message.query, is_override=True if message.override_model_configs else False, - streaming=streaming + streaming=streaming, + model_instance=final_model_instance ) - llm.callbacks = cls.get_llm_callbacks(llm, streaming, conversation_message_task) - cls.recale_llm_max_tokens( - final_llm=llm, - model=app_model_config.model_dict, - prompt=prompt, - mode='completion' + model_instance=final_model_instance, + prompt_messages=prompt_messages ) - llm.generate([prompt]) + final_model_instance.run( + messages=prompt_messages, + callbacks=[LLMCallbackHandler(final_model_instance, conversation_message_task)] + ) diff --git a/api/core/constant/llm_constant.py b/api/core/constant/llm_constant.py deleted file mode 100644 index 3a02abc90..000000000 --- a/api/core/constant/llm_constant.py +++ /dev/null @@ -1,109 +0,0 @@ -from _decimal import Decimal - -models = { - 'claude-instant-1': 'anthropic', # 100,000 tokens - 'claude-2': 'anthropic', # 100,000 tokens - 'gpt-4': 'openai', # 8,192 tokens - 'gpt-4-32k': 'openai', # 32,768 tokens - 'gpt-3.5-turbo': 'openai', # 4,096 tokens - 'gpt-3.5-turbo-16k': 'openai', # 16384 tokens - 'text-davinci-003': 'openai', # 4,097 tokens - 'text-davinci-002': 'openai', # 4,097 tokens - 'text-curie-001': 'openai', # 2,049 tokens - 'text-babbage-001': 'openai', # 2,049 tokens - 'text-ada-001': 'openai', # 2,049 tokens - 'text-embedding-ada-002': 'openai', # 8191 tokens, 1536 dimensions - 'whisper-1': 'openai' -} - -max_context_token_length = { - 'claude-instant-1': 100000, - 'claude-2': 100000, - 'gpt-4': 8192, - 'gpt-4-32k': 32768, - 'gpt-3.5-turbo': 4096, - 'gpt-3.5-turbo-16k': 16384, - 'text-davinci-003': 4097, - 'text-davinci-002': 4097, - 'text-curie-001': 2049, - 'text-babbage-001': 2049, - 'text-ada-001': 2049, - 'text-embedding-ada-002': 8191, -} - -models_by_mode = { - 'chat': [ - 'claude-instant-1', # 100,000 tokens - 'claude-2', # 100,000 tokens - 'gpt-4', # 8,192 tokens - 'gpt-4-32k', # 32,768 tokens - 'gpt-3.5-turbo', # 4,096 tokens - 'gpt-3.5-turbo-16k', # 16,384 tokens - ], - 'completion': [ - 'claude-instant-1', # 100,000 tokens - 'claude-2', # 100,000 tokens - 'gpt-4', # 8,192 tokens - 'gpt-4-32k', # 32,768 tokens - 'gpt-3.5-turbo', # 4,096 tokens - 'gpt-3.5-turbo-16k', # 16,384 tokens - 'text-davinci-003', # 4,097 tokens - 'text-davinci-002' # 4,097 tokens - 'text-curie-001', # 2,049 tokens - 'text-babbage-001', # 2,049 tokens - 'text-ada-001' # 2,049 tokens - ], - 'embedding': [ - 'text-embedding-ada-002' # 8191 tokens, 1536 dimensions - ] -} - -model_currency = 'USD' - -model_prices = { - 'claude-instant-1': { - 'prompt': Decimal('0.00163'), - 'completion': Decimal('0.00551'), - }, - 'claude-2': { - 'prompt': Decimal('0.01102'), - 'completion': Decimal('0.03268'), - }, - 'gpt-4': { - 'prompt': Decimal('0.03'), - 'completion': Decimal('0.06'), - }, - 'gpt-4-32k': { - 'prompt': Decimal('0.06'), - 'completion': Decimal('0.12') - }, - 'gpt-3.5-turbo': { - 'prompt': Decimal('0.0015'), - 'completion': Decimal('0.002') - }, - 'gpt-3.5-turbo-16k': { - 'prompt': Decimal('0.003'), - 'completion': Decimal('0.004') - }, - 'text-davinci-003': { - 'prompt': Decimal('0.02'), - 'completion': Decimal('0.02') - }, - 'text-curie-001': { - 'prompt': Decimal('0.002'), - 'completion': Decimal('0.002') - }, - 'text-babbage-001': { - 'prompt': Decimal('0.0005'), - 'completion': Decimal('0.0005') - }, - 'text-ada-001': { - 'prompt': Decimal('0.0004'), - 'completion': Decimal('0.0004') - }, - 'text-embedding-ada-002': { - 'usage': Decimal('0.0001'), - } -} - -agent_model_name = 'text-davinci-003' diff --git a/api/core/conversation_message_task.py b/api/core/conversation_message_task.py index 51c2b6902..e9d9f3ec8 100644 --- a/api/core/conversation_message_task.py +++ b/api/core/conversation_message_task.py @@ -6,9 +6,9 @@ from core.callback_handler.entity.agent_loop import AgentLoop from core.callback_handler.entity.dataset_query import DatasetQueryObj from core.callback_handler.entity.llm_message import LLMMessage from core.callback_handler.entity.chain_result import ChainResult -from core.constant import llm_constant -from core.llm.llm_builder import LLMBuilder -from core.llm.provider.llm_provider_service import LLMProviderService +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.message import to_prompt_messages, MessageType +from core.model_providers.models.llm.base import BaseLLM from core.prompt.prompt_builder import PromptBuilder from core.prompt.prompt_template import JinjaPromptTemplate from events.message_event import message_was_created @@ -16,12 +16,11 @@ from extensions.ext_database import db from extensions.ext_redis import redis_client from models.dataset import DatasetQuery from models.model import AppModelConfig, Conversation, Account, Message, EndUser, App, MessageAgentThought, MessageChain -from models.provider import ProviderType, Provider class ConversationMessageTask: def __init__(self, task_id: str, app: App, app_model_config: AppModelConfig, user: Account, - inputs: dict, query: str, streaming: bool, + inputs: dict, query: str, streaming: bool, model_instance: BaseLLM, conversation: Optional[Conversation] = None, is_override: bool = False): self.task_id = task_id @@ -38,9 +37,12 @@ class ConversationMessageTask: self.conversation = conversation self.is_new_conversation = False + self.model_instance = model_instance + self.message = None self.model_dict = self.app_model_config.model_dict + self.provider_name = self.model_dict.get('provider') self.model_name = self.model_dict.get('name') self.mode = app.mode @@ -56,9 +58,6 @@ class ConversationMessageTask: ) def init(self): - provider_name = LLMBuilder.get_default_provider(self.app.tenant_id, self.model_name) - self.model_dict['provider'] = provider_name - override_model_configs = None if self.is_override: override_model_configs = { @@ -89,15 +88,19 @@ class ConversationMessageTask: if self.app_model_config.pre_prompt: system_message = PromptBuilder.to_system_message(self.app_model_config.pre_prompt, self.inputs) system_instruction = system_message.content - llm = LLMBuilder.to_llm(self.tenant_id, self.model_name) - system_instruction_tokens = llm.get_num_tokens_from_messages([system_message]) + model_instance = ModelFactory.get_text_generation_model( + tenant_id=self.tenant_id, + model_provider_name=self.provider_name, + model_name=self.model_name + ) + system_instruction_tokens = model_instance.get_num_tokens(to_prompt_messages([system_message])) if not self.conversation: self.is_new_conversation = True self.conversation = Conversation( app_id=self.app_model_config.app_id, app_model_config_id=self.app_model_config.id, - model_provider=self.model_dict.get('provider'), + model_provider=self.provider_name, model_id=self.model_name, override_model_configs=json.dumps(override_model_configs) if override_model_configs else None, mode=self.mode, @@ -117,7 +120,7 @@ class ConversationMessageTask: self.message = Message( app_id=self.app_model_config.app_id, - model_provider=self.model_dict.get('provider'), + model_provider=self.provider_name, model_id=self.model_name, override_model_configs=json.dumps(override_model_configs) if override_model_configs else None, conversation_id=self.conversation.id, @@ -131,7 +134,7 @@ class ConversationMessageTask: answer_unit_price=0, provider_response_latency=0, total_price=0, - currency=llm_constant.model_currency, + currency=self.model_instance.get_currency(), from_source=('console' if isinstance(self.user, Account) else 'api'), from_end_user_id=(self.user.id if isinstance(self.user, EndUser) else None), from_account_id=(self.user.id if isinstance(self.user, Account) else None), @@ -145,12 +148,10 @@ class ConversationMessageTask: self._pub_handler.pub_text(text) def save_message(self, llm_message: LLMMessage, by_stopped: bool = False): - model_name = self.app_model_config.model_dict.get('name') - message_tokens = llm_message.prompt_tokens answer_tokens = llm_message.completion_tokens - message_unit_price = llm_constant.model_prices[model_name]['prompt'] - answer_unit_price = llm_constant.model_prices[model_name]['completion'] + message_unit_price = self.model_instance.get_token_price(1, MessageType.HUMAN) + answer_unit_price = self.model_instance.get_token_price(1, MessageType.ASSISTANT) total_price = self.calc_total_price(message_tokens, message_unit_price, answer_tokens, answer_unit_price) @@ -163,8 +164,6 @@ class ConversationMessageTask: self.message.provider_response_latency = llm_message.latency self.message.total_price = total_price - self.update_provider_quota() - db.session.commit() message_was_created.send( @@ -176,20 +175,6 @@ class ConversationMessageTask: if not by_stopped: self.end() - def update_provider_quota(self): - llm_provider_service = LLMProviderService( - tenant_id=self.app.tenant_id, - provider_name=self.message.model_provider, - ) - - provider = llm_provider_service.get_provider_db_record() - if provider and provider.provider_type == ProviderType.SYSTEM.value: - db.session.query(Provider).filter( - Provider.tenant_id == self.app.tenant_id, - Provider.provider_name == provider.provider_name, - Provider.quota_limit > Provider.quota_used - ).update({'quota_used': Provider.quota_used + 1}) - def init_chain(self, chain_result: ChainResult): message_chain = MessageChain( message_id=self.message.id, @@ -229,10 +214,10 @@ class ConversationMessageTask: return message_agent_thought - def on_agent_end(self, message_agent_thought: MessageAgentThought, agent_model_name: str, + def on_agent_end(self, message_agent_thought: MessageAgentThought, agent_model_instant: BaseLLM, agent_loop: AgentLoop): - agent_message_unit_price = llm_constant.model_prices[agent_model_name]['prompt'] - agent_answer_unit_price = llm_constant.model_prices[agent_model_name]['completion'] + agent_message_unit_price = agent_model_instant.get_token_price(1, MessageType.HUMAN) + agent_answer_unit_price = agent_model_instant.get_token_price(1, MessageType.ASSISTANT) loop_message_tokens = agent_loop.prompt_tokens loop_answer_tokens = agent_loop.completion_tokens @@ -253,7 +238,7 @@ class ConversationMessageTask: message_agent_thought.latency = agent_loop.latency message_agent_thought.tokens = agent_loop.prompt_tokens + agent_loop.completion_tokens message_agent_thought.total_price = loop_total_price - message_agent_thought.currency = llm_constant.model_currency + message_agent_thought.currency = agent_model_instant.get_currency() db.session.flush() def on_dataset_query_end(self, dataset_query_obj: DatasetQueryObj): diff --git a/api/core/docstore/dataset_docstore.py b/api/core/docstore/dataset_docstore.py index 016e71137..786ae4469 100644 --- a/api/core/docstore/dataset_docstore.py +++ b/api/core/docstore/dataset_docstore.py @@ -3,7 +3,7 @@ from typing import Any, Dict, Optional, Sequence from langchain.schema import Document from sqlalchemy import func -from core.llm.token_calculator import TokenCalculator +from core.model_providers.model_factory import ModelFactory from extensions.ext_database import db from models.dataset import Dataset, DocumentSegment @@ -13,12 +13,10 @@ class DatesetDocumentStore: self, dataset: Dataset, user_id: str, - embedding_model_name: str, document_id: Optional[str] = None, ): self._dataset = dataset self._user_id = user_id - self._embedding_model_name = embedding_model_name self._document_id = document_id @classmethod @@ -39,10 +37,6 @@ class DatesetDocumentStore: def user_id(self) -> Any: return self._user_id - @property - def embedding_model_name(self) -> Any: - return self._embedding_model_name - @property def docs(self) -> Dict[str, Document]: document_segments = db.session.query(DocumentSegment).filter( @@ -74,6 +68,10 @@ class DatesetDocumentStore: if max_position is None: max_position = 0 + embedding_model = ModelFactory.get_embedding_model( + tenant_id=self._dataset.tenant_id + ) + for doc in docs: if not isinstance(doc, Document): raise ValueError("doc must be a Document") @@ -88,7 +86,7 @@ class DatesetDocumentStore: ) # calc embedding use tokens - tokens = TokenCalculator.get_num_tokens(self._embedding_model_name, doc.page_content) + tokens = embedding_model.get_num_tokens(doc.page_content) if not segment_document: max_position += 1 diff --git a/api/core/embedding/cached_embedding.py b/api/core/embedding/cached_embedding.py index 045b13ea3..63bab8cd5 100644 --- a/api/core/embedding/cached_embedding.py +++ b/api/core/embedding/cached_embedding.py @@ -4,14 +4,14 @@ from typing import List from langchain.embeddings.base import Embeddings from sqlalchemy.exc import IntegrityError -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions +from core.model_providers.models.embedding.base import BaseEmbedding from extensions.ext_database import db from libs import helper from models.dataset import Embedding class CacheEmbedding(Embeddings): - def __init__(self, embeddings: Embeddings): + def __init__(self, embeddings: BaseEmbedding): self._embeddings = embeddings def embed_documents(self, texts: List[str]) -> List[List[float]]: @@ -21,48 +21,54 @@ class CacheEmbedding(Embeddings): embedding_queue_texts = [] for text in texts: hash = helper.generate_text_hash(text) - embedding = db.session.query(Embedding).filter_by(hash=hash).first() + embedding = db.session.query(Embedding).filter_by(model_name=self._embeddings.name, hash=hash).first() if embedding: text_embeddings.append(embedding.get_embedding()) else: embedding_queue_texts.append(text) - embedding_results = self._embeddings.embed_documents(embedding_queue_texts) - - i = 0 - for text in embedding_queue_texts: - hash = helper.generate_text_hash(text) - + if embedding_queue_texts: try: - embedding = Embedding(hash=hash) - embedding.set_embedding(embedding_results[i]) - db.session.add(embedding) - db.session.commit() - except IntegrityError: - db.session.rollback() - continue - except: - logging.exception('Failed to add embedding to db') - continue - finally: - i += 1 + embedding_results = self._embeddings.client.embed_documents(embedding_queue_texts) + except Exception as ex: + raise self._embeddings.handle_exceptions(ex) - text_embeddings.extend(embedding_results) + i = 0 + for text in embedding_queue_texts: + hash = helper.generate_text_hash(text) + + try: + embedding = Embedding(model_name=self._embeddings.name, hash=hash) + embedding.set_embedding(embedding_results[i]) + db.session.add(embedding) + db.session.commit() + except IntegrityError: + db.session.rollback() + continue + except: + logging.exception('Failed to add embedding to db') + continue + finally: + i += 1 + + text_embeddings.extend(embedding_results) return text_embeddings - @handle_openai_exceptions def embed_query(self, text: str) -> List[float]: """Embed query text.""" # use doc embedding cache or store if not exists hash = helper.generate_text_hash(text) - embedding = db.session.query(Embedding).filter_by(hash=hash).first() + embedding = db.session.query(Embedding).filter_by(model_name=self._embeddings.name, hash=hash).first() if embedding: return embedding.get_embedding() - embedding_results = self._embeddings.embed_query(text) + try: + embedding_results = self._embeddings.client.embed_query(text) + except Exception as ex: + raise self._embeddings.handle_exceptions(ex) try: - embedding = Embedding(hash=hash) + embedding = Embedding(model_name=self._embeddings.name, hash=hash) embedding.set_embedding(embedding_results) db.session.add(embedding) db.session.commit() @@ -72,3 +78,5 @@ class CacheEmbedding(Embeddings): logging.exception('Failed to add embedding to db') return embedding_results + + diff --git a/api/core/generator/llm_generator.py b/api/core/generator/llm_generator.py index a5294add2..77cf8a234 100644 --- a/api/core/generator/llm_generator.py +++ b/api/core/generator/llm_generator.py @@ -1,13 +1,10 @@ import logging -from langchain import PromptTemplate -from langchain.chat_models.base import BaseChatModel -from langchain.schema import HumanMessage, OutputParserException, BaseMessage, SystemMessage +from langchain.schema import OutputParserException -from core.constant import llm_constant -from core.llm.llm_builder import LLMBuilder -from core.llm.streamable_open_ai import StreamableOpenAI -from core.llm.token_calculator import TokenCalculator +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs from core.prompt.output_parser.rule_config_generator import RuleConfigGeneratorOutputParser from core.prompt.output_parser.suggested_questions_after_answer import SuggestedQuestionsAfterAnswerOutputParser @@ -15,9 +12,6 @@ from core.prompt.prompt_template import JinjaPromptTemplate, OutLinePromptTempla from core.prompt.prompts import CONVERSATION_TITLE_PROMPT, CONVERSATION_SUMMARY_PROMPT, INTRODUCTION_GENERATE_PROMPT, \ GENERATOR_QA_PROMPT -# gpt-3.5-turbo works not well -generate_base_model = 'text-davinci-003' - class LLMGenerator: @classmethod @@ -28,29 +22,35 @@ class LLMGenerator: query = query[:300] + "...[TRUNCATED]..." + query[-300:] prompt = prompt.format(query=query) - llm: StreamableOpenAI = LLMBuilder.to_llm( + + model_instance = ModelFactory.get_text_generation_model( tenant_id=tenant_id, - model_name='gpt-3.5-turbo', - max_tokens=50, - timeout=600 + model_kwargs=ModelKwargs( + max_tokens=50 + ) ) - if isinstance(llm, BaseChatModel): - prompt = [HumanMessage(content=prompt)] - - response = llm.generate([prompt]) - answer = response.generations[0][0].text + prompts = [PromptMessage(content=prompt)] + response = model_instance.run(prompts) + answer = response.content return answer.strip() @classmethod def generate_conversation_summary(cls, tenant_id: str, messages): max_tokens = 200 - model = 'gpt-3.5-turbo' + + model_instance = ModelFactory.get_text_generation_model( + tenant_id=tenant_id, + model_kwargs=ModelKwargs( + max_tokens=max_tokens + ) + ) prompt = CONVERSATION_SUMMARY_PROMPT prompt_with_empty_context = prompt.format(context='') - prompt_tokens = TokenCalculator.get_num_tokens(model, prompt_with_empty_context) - rest_tokens = llm_constant.max_context_token_length[model] - prompt_tokens - max_tokens - 1 + prompt_tokens = model_instance.get_num_tokens([PromptMessage(content=prompt_with_empty_context)]) + max_context_token_length = model_instance.model_rules.max_tokens.max + rest_tokens = max_context_token_length - prompt_tokens - max_tokens - 1 context = '' for message in messages: @@ -68,25 +68,16 @@ class LLMGenerator: answer = message.answer message_qa_text = "\n\nHuman:" + query + "\n\nAssistant:" + answer - if rest_tokens - TokenCalculator.get_num_tokens(model, context + message_qa_text) > 0: + if rest_tokens - model_instance.get_num_tokens([PromptMessage(content=context + message_qa_text)]) > 0: context += message_qa_text if not context: return '[message too long, no summary]' prompt = prompt.format(context=context) - - llm: StreamableOpenAI = LLMBuilder.to_llm( - tenant_id=tenant_id, - model_name=model, - max_tokens=max_tokens - ) - - if isinstance(llm, BaseChatModel): - prompt = [HumanMessage(content=prompt)] - - response = llm.generate([prompt]) - answer = response.generations[0][0].text + prompts = [PromptMessage(content=prompt)] + response = model_instance.run(prompts) + answer = response.content return answer.strip() @classmethod @@ -94,16 +85,13 @@ class LLMGenerator: prompt = INTRODUCTION_GENERATE_PROMPT prompt = prompt.format(prompt=pre_prompt) - llm: StreamableOpenAI = LLMBuilder.to_llm( - tenant_id=tenant_id, - model_name=generate_base_model, + model_instance = ModelFactory.get_text_generation_model( + tenant_id=tenant_id ) - if isinstance(llm, BaseChatModel): - prompt = [HumanMessage(content=prompt)] - - response = llm.generate([prompt]) - answer = response.generations[0][0].text + prompts = [PromptMessage(content=prompt)] + response = model_instance.run(prompts) + answer = response.content return answer.strip() @classmethod @@ -119,23 +107,19 @@ class LLMGenerator: _input = prompt.format_prompt(histories=histories) - llm: StreamableOpenAI = LLMBuilder.to_llm( + model_instance = ModelFactory.get_text_generation_model( tenant_id=tenant_id, - model_name='gpt-3.5-turbo', - temperature=0, - max_tokens=256 + model_kwargs=ModelKwargs( + max_tokens=256, + temperature=0 + ) ) - if isinstance(llm, BaseChatModel): - query = [HumanMessage(content=_input.to_string())] - else: - query = _input.to_string() + prompts = [PromptMessage(content=_input.to_string())] try: - output = llm(query) - if isinstance(output, BaseMessage): - output = output.content - questions = output_parser.parse(output) + output = model_instance.run(prompts) + questions = output_parser.parse(output.content) except Exception: logging.exception("Error generating suggested questions after answer") questions = [] @@ -160,21 +144,19 @@ class LLMGenerator: _input = prompt.format_prompt(audiences=audiences, hoping_to_solve=hoping_to_solve) - llm: StreamableOpenAI = LLMBuilder.to_llm( + model_instance = ModelFactory.get_text_generation_model( tenant_id=tenant_id, - model_name=generate_base_model, - temperature=0, - max_tokens=512 + model_kwargs=ModelKwargs( + max_tokens=512, + temperature=0 + ) ) - if isinstance(llm, BaseChatModel): - query = [HumanMessage(content=_input.to_string())] - else: - query = _input.to_string() + prompts = [PromptMessage(content=_input.to_string())] try: - output = llm(query) - rule_config = output_parser.parse(output) + output = model_instance.run(prompts) + rule_config = output_parser.parse(output.content) except OutputParserException: raise ValueError('Please give a valid input for intended audience or hoping to solve problems.') except Exception: @@ -188,25 +170,21 @@ class LLMGenerator: return rule_config @classmethod - async def generate_qa_document(cls, llm: StreamableOpenAI, query): + def generate_qa_document(cls, tenant_id: str, query): prompt = GENERATOR_QA_PROMPT + model_instance = ModelFactory.get_text_generation_model( + tenant_id=tenant_id, + model_kwargs=ModelKwargs( + max_tokens=2000 + ) + ) - if isinstance(llm, BaseChatModel): - prompt = [SystemMessage(content=prompt), HumanMessage(content=query)] + prompts = [ + PromptMessage(content=prompt, type=MessageType.SYSTEM), + PromptMessage(content=query) + ] - response = llm.generate([prompt]) - answer = response.generations[0][0].text - return answer.strip() - - @classmethod - def generate_qa_document_sync(cls, llm: StreamableOpenAI, query): - prompt = GENERATOR_QA_PROMPT - - - if isinstance(llm, BaseChatModel): - prompt = [SystemMessage(content=prompt), HumanMessage(content=query)] - - response = llm.generate([prompt]) - answer = response.generations[0][0].text + response = model_instance.run(prompts) + answer = response.content return answer.strip() diff --git a/api/tests/test_helpers/__init__.py b/api/core/helper/__init__.py similarity index 100% rename from api/tests/test_helpers/__init__.py rename to api/core/helper/__init__.py diff --git a/api/core/helper/encrypter.py b/api/core/helper/encrypter.py new file mode 100644 index 000000000..fa94867ba --- /dev/null +++ b/api/core/helper/encrypter.py @@ -0,0 +1,20 @@ +import base64 + +from extensions.ext_database import db +from libs import rsa + +from models.account import Tenant + + +def obfuscated_token(token: str): + return token[:6] + '*' * (len(token) - 8) + token[-2:] + + +def encrypt_token(tenant_id: str, token: str): + tenant = db.session.query(Tenant).filter(Tenant.id == tenant_id).first() + encrypted_token = rsa.encrypt(token, tenant.encrypt_public_key) + return base64.b64encode(encrypted_token).decode() + + +def decrypt_token(tenant_id: str, token: str): + return rsa.decrypt(base64.b64decode(token), tenant_id) diff --git a/api/core/index/index.py b/api/core/index/index.py index 657ad221e..316b60456 100644 --- a/api/core/index/index.py +++ b/api/core/index/index.py @@ -1,10 +1,9 @@ from flask import current_app -from langchain.embeddings import OpenAIEmbeddings from core.embedding.cached_embedding import CacheEmbedding from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig from core.index.vector_index.vector_index import VectorIndex -from core.llm.llm_builder import LLMBuilder +from core.model_providers.model_factory import ModelFactory from models.dataset import Dataset @@ -15,16 +14,11 @@ class IndexBuilder: if not ignore_high_quality_check and dataset.indexing_technique != 'high_quality': return None - model_credentials = LLMBuilder.get_model_credentials( - tenant_id=dataset.tenant_id, - model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'), - model_name='text-embedding-ada-002' + embedding_model = ModelFactory.get_embedding_model( + tenant_id=dataset.tenant_id ) - embeddings = CacheEmbedding(OpenAIEmbeddings( - max_retries=1, - **model_credentials - )) + embeddings = CacheEmbedding(embedding_model) return VectorIndex( dataset=dataset, diff --git a/api/core/indexing_runner.py b/api/core/indexing_runner.py index 56df8f231..57e1e8fab 100644 --- a/api/core/indexing_runner.py +++ b/api/core/indexing_runner.py @@ -1,4 +1,3 @@ -import concurrent import datetime import json import logging @@ -6,7 +5,6 @@ import re import threading import time import uuid -from concurrent.futures import ThreadPoolExecutor from typing import Optional, List, cast from flask_login import current_user @@ -18,11 +16,10 @@ from core.data_loader.loader.notion import NotionLoader from core.docstore.dataset_docstore import DatesetDocumentStore from core.generator.llm_generator import LLMGenerator from core.index.index import IndexBuilder -from core.llm.error import ProviderTokenNotInitError -from core.llm.llm_builder import LLMBuilder -from core.llm.streamable_open_ai import StreamableOpenAI +from core.model_providers.error import ProviderTokenNotInitError +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.message import MessageType from core.spiltter.fixed_text_splitter import FixedRecursiveCharacterTextSplitter -from core.llm.token_calculator import TokenCalculator from extensions.ext_database import db from extensions.ext_redis import redis_client from extensions.ext_storage import storage @@ -35,9 +32,8 @@ from models.source import DataSourceBinding class IndexingRunner: - def __init__(self, embedding_model_name: str = "text-embedding-ada-002"): + def __init__(self): self.storage = storage - self.embedding_model_name = embedding_model_name def run(self, dataset_documents: List[DatasetDocument]): """Run the indexing process.""" @@ -227,11 +223,15 @@ class IndexingRunner: dataset_document.stopped_at = datetime.datetime.utcnow() db.session.commit() - def file_indexing_estimate(self, file_details: List[UploadFile], tmp_processing_rule: dict, + def file_indexing_estimate(self, tenant_id: str, file_details: List[UploadFile], tmp_processing_rule: dict, doc_form: str = None) -> dict: """ Estimate the indexing for the document. """ + embedding_model = ModelFactory.get_embedding_model( + tenant_id=tenant_id + ) + tokens = 0 preview_texts = [] total_segments = 0 @@ -253,44 +253,49 @@ class IndexingRunner: splitter=splitter, processing_rule=processing_rule ) + total_segments += len(documents) + for document in documents: if len(preview_texts) < 5: preview_texts.append(document.page_content) - tokens += TokenCalculator.get_num_tokens(self.embedding_model_name, - self.filter_string(document.page_content)) + tokens += embedding_model.get_num_tokens(self.filter_string(document.page_content)) + + text_generation_model = ModelFactory.get_text_generation_model( + tenant_id=tenant_id + ) + if doc_form and doc_form == 'qa_model': if len(preview_texts) > 0: # qa model document - llm: StreamableOpenAI = LLMBuilder.to_llm( - tenant_id=current_user.current_tenant_id, - model_name='gpt-3.5-turbo', - max_tokens=2000 - ) - response = LLMGenerator.generate_qa_document_sync(llm, preview_texts[0]) + response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0]) document_qa_list = self.format_split_text(response) return { "total_segments": total_segments * 20, "tokens": total_segments * 2000, "total_price": '{:f}'.format( - TokenCalculator.get_token_price('gpt-3.5-turbo', total_segments * 2000, 'completion')), - "currency": TokenCalculator.get_currency(self.embedding_model_name), + text_generation_model.get_token_price(total_segments * 2000, MessageType.HUMAN)), + "currency": embedding_model.get_currency(), "qa_preview": document_qa_list, "preview": preview_texts } return { "total_segments": total_segments, "tokens": tokens, - "total_price": '{:f}'.format(TokenCalculator.get_token_price(self.embedding_model_name, tokens)), - "currency": TokenCalculator.get_currency(self.embedding_model_name), + "total_price": '{:f}'.format(embedding_model.get_token_price(tokens)), + "currency": embedding_model.get_currency(), "preview": preview_texts } - def notion_indexing_estimate(self, notion_info_list: list, tmp_processing_rule: dict, doc_form: str = None) -> dict: + def notion_indexing_estimate(self, tenant_id: str, notion_info_list: list, tmp_processing_rule: dict, doc_form: str = None) -> dict: """ Estimate the indexing for the document. """ + embedding_model = ModelFactory.get_embedding_model( + tenant_id=tenant_id + ) + # load data from notion tokens = 0 preview_texts = [] @@ -336,31 +341,31 @@ class IndexingRunner: if len(preview_texts) < 5: preview_texts.append(document.page_content) - tokens += TokenCalculator.get_num_tokens(self.embedding_model_name, document.page_content) + tokens += embedding_model.get_num_tokens(document.page_content) + + text_generation_model = ModelFactory.get_text_generation_model( + tenant_id=tenant_id + ) + if doc_form and doc_form == 'qa_model': if len(preview_texts) > 0: # qa model document - llm: StreamableOpenAI = LLMBuilder.to_llm( - tenant_id=current_user.current_tenant_id, - model_name='gpt-3.5-turbo', - max_tokens=2000 - ) - response = LLMGenerator.generate_qa_document_sync(llm, preview_texts[0]) + response = LLMGenerator.generate_qa_document(current_user.current_tenant_id, preview_texts[0]) document_qa_list = self.format_split_text(response) return { "total_segments": total_segments * 20, "tokens": total_segments * 2000, "total_price": '{:f}'.format( - TokenCalculator.get_token_price('gpt-3.5-turbo', total_segments * 2000, 'completion')), - "currency": TokenCalculator.get_currency(self.embedding_model_name), + text_generation_model.get_token_price(total_segments * 2000, MessageType.HUMAN)), + "currency": embedding_model.get_currency(), "qa_preview": document_qa_list, "preview": preview_texts } return { "total_segments": total_segments, "tokens": tokens, - "total_price": '{:f}'.format(TokenCalculator.get_token_price(self.embedding_model_name, tokens)), - "currency": TokenCalculator.get_currency(self.embedding_model_name), + "total_price": '{:f}'.format(embedding_model.get_token_price(tokens)), + "currency": embedding_model.get_currency(), "preview": preview_texts } @@ -459,7 +464,6 @@ class IndexingRunner: doc_store = DatesetDocumentStore( dataset=dataset, user_id=dataset_document.created_by, - embedding_model_name=self.embedding_model_name, document_id=dataset_document.id ) @@ -513,17 +517,12 @@ class IndexingRunner: all_documents.extend(split_documents) # processing qa document if document_form == 'qa_model': - llm: StreamableOpenAI = LLMBuilder.to_llm( - tenant_id=tenant_id, - model_name='gpt-3.5-turbo', - max_tokens=2000 - ) for i in range(0, len(all_documents), 10): threads = [] sub_documents = all_documents[i:i + 10] for doc in sub_documents: document_format_thread = threading.Thread(target=self.format_qa_document, kwargs={ - 'llm': llm, 'document_node': doc, 'all_qa_documents': all_qa_documents}) + 'tenant_id': tenant_id, 'document_node': doc, 'all_qa_documents': all_qa_documents}) threads.append(document_format_thread) document_format_thread.start() for thread in threads: @@ -531,13 +530,13 @@ class IndexingRunner: return all_qa_documents return all_documents - def format_qa_document(self, llm: StreamableOpenAI, document_node, all_qa_documents): + def format_qa_document(self, tenant_id: str, document_node, all_qa_documents): format_documents = [] if document_node.page_content is None or not document_node.page_content.strip(): return try: # qa model document - response = LLMGenerator.generate_qa_document_sync(llm, document_node.page_content) + response = LLMGenerator.generate_qa_document(tenant_id, document_node.page_content) document_qa_list = self.format_split_text(response) qa_documents = [] for result in document_qa_list: @@ -638,6 +637,10 @@ class IndexingRunner: vector_index = IndexBuilder.get_index(dataset, 'high_quality') keyword_table_index = IndexBuilder.get_index(dataset, 'economy') + embedding_model = ModelFactory.get_embedding_model( + tenant_id=dataset.tenant_id + ) + # chunk nodes by chunk size indexing_start_at = time.perf_counter() tokens = 0 @@ -648,7 +651,7 @@ class IndexingRunner: chunk_documents = documents[i:i + chunk_size] tokens += sum( - TokenCalculator.get_num_tokens(self.embedding_model_name, document.page_content) + embedding_model.get_num_tokens(document.page_content) for document in chunk_documents ) diff --git a/api/core/llm/llm_builder.py b/api/core/llm/llm_builder.py deleted file mode 100644 index f054939fd..000000000 --- a/api/core/llm/llm_builder.py +++ /dev/null @@ -1,148 +0,0 @@ -from typing import Union, Optional, List - -from langchain.callbacks.base import BaseCallbackHandler - -from core.constant import llm_constant -from core.llm.error import ProviderTokenNotInitError -from core.llm.provider.base import BaseProvider -from core.llm.provider.llm_provider_service import LLMProviderService -from core.llm.streamable_azure_chat_open_ai import StreamableAzureChatOpenAI -from core.llm.streamable_azure_open_ai import StreamableAzureOpenAI -from core.llm.streamable_chat_anthropic import StreamableChatAnthropic -from core.llm.streamable_chat_open_ai import StreamableChatOpenAI -from core.llm.streamable_open_ai import StreamableOpenAI -from models.provider import ProviderType, ProviderName - - -class LLMBuilder: - """ - This class handles the following logic: - 1. For providers with the name 'OpenAI', the OPENAI_API_KEY value is stored directly in encrypted_config. - 2. For providers with the name 'Azure OpenAI', encrypted_config stores the serialized values of four fields, as shown below: - OPENAI_API_TYPE=azure - OPENAI_API_VERSION=2022-12-01 - OPENAI_API_BASE=https://your-resource-name.openai.azure.com - OPENAI_API_KEY= - 3. For providers with the name 'Anthropic', the ANTHROPIC_API_KEY value is stored directly in encrypted_config. - 4. For providers with the name 'Cohere', the COHERE_API_KEY value is stored directly in encrypted_config. - 5. For providers with the name 'HUGGINGFACEHUB', the HUGGINGFACEHUB_API_KEY value is stored directly in encrypted_config. - 6. Providers with the provider_type 'CUSTOM' can be created through the admin interface, while 'System' providers cannot be created through the admin interface. - 7. If both CUSTOM and System providers exist in the records, the CUSTOM provider is preferred by default, but this preference can be changed via an input parameter. - 8. For providers with the provider_type 'System', the quota_used must not exceed quota_limit. If the quota is exceeded, the provider cannot be used. Currently, only the TRIAL quota_type is supported, which is permanently non-resetting. - """ - - @classmethod - def to_llm(cls, tenant_id: str, model_name: str, **kwargs) -> Union[StreamableOpenAI, StreamableChatOpenAI]: - provider = cls.get_default_provider(tenant_id, model_name) - - model_credentials = cls.get_model_credentials(tenant_id, provider, model_name) - - llm_cls = None - mode = cls.get_mode_by_model(model_name) - if mode == 'chat': - if provider == ProviderName.OPENAI.value: - llm_cls = StreamableChatOpenAI - elif provider == ProviderName.AZURE_OPENAI.value: - llm_cls = StreamableAzureChatOpenAI - elif provider == ProviderName.ANTHROPIC.value: - llm_cls = StreamableChatAnthropic - elif mode == 'completion': - if provider == ProviderName.OPENAI.value: - llm_cls = StreamableOpenAI - elif provider == ProviderName.AZURE_OPENAI.value: - llm_cls = StreamableAzureOpenAI - - if not llm_cls: - raise ValueError(f"model name {model_name} is not supported.") - - model_kwargs = { - 'model_name': model_name, - 'temperature': kwargs.get('temperature', 0), - 'max_tokens': kwargs.get('max_tokens', 256), - 'top_p': kwargs.get('top_p', 1), - 'frequency_penalty': kwargs.get('frequency_penalty', 0), - 'presence_penalty': kwargs.get('presence_penalty', 0), - 'callbacks': kwargs.get('callbacks', None), - 'streaming': kwargs.get('streaming', False), - } - - model_kwargs.update(model_credentials) - model_kwargs = llm_cls.get_kwargs_from_model_params(model_kwargs) - - return llm_cls(**model_kwargs) - - @classmethod - def to_llm_from_model(cls, tenant_id: str, model: dict, streaming: bool = False, - callbacks: Optional[List[BaseCallbackHandler]] = None) -> Union[StreamableOpenAI, StreamableChatOpenAI]: - model_name = model.get("name") - completion_params = model.get("completion_params", {}) - - return cls.to_llm( - tenant_id=tenant_id, - model_name=model_name, - temperature=completion_params.get('temperature', 0), - max_tokens=completion_params.get('max_tokens', 256), - top_p=completion_params.get('top_p', 0), - frequency_penalty=completion_params.get('frequency_penalty', 0.1), - presence_penalty=completion_params.get('presence_penalty', 0.1), - streaming=streaming, - callbacks=callbacks - ) - - @classmethod - def get_mode_by_model(cls, model_name: str) -> str: - if not model_name: - raise ValueError(f"empty model name is not supported.") - - if model_name in llm_constant.models_by_mode['chat']: - return "chat" - elif model_name in llm_constant.models_by_mode['completion']: - return "completion" - else: - raise ValueError(f"model name {model_name} is not supported.") - - @classmethod - def get_model_credentials(cls, tenant_id: str, model_provider: str, model_name: str) -> dict: - """ - Returns the API credentials for the given tenant_id and model_name, based on the model's provider. - Raises an exception if the model_name is not found or if the provider is not found. - """ - if not model_name: - raise Exception('model name not found') - # - # if model_name not in llm_constant.models: - # raise Exception('model {} not found'.format(model_name)) - - # model_provider = llm_constant.models[model_name] - - provider_service = LLMProviderService(tenant_id=tenant_id, provider_name=model_provider) - return provider_service.get_credentials(model_name) - - @classmethod - def get_default_provider(cls, tenant_id: str, model_name: str) -> str: - provider_name = llm_constant.models[model_name] - - if provider_name == 'openai': - # get the default provider (openai / azure_openai) for the tenant - openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.OPENAI.value) - azure_openai_provider = BaseProvider.get_valid_provider(tenant_id, ProviderName.AZURE_OPENAI.value) - - provider = None - if openai_provider and openai_provider.provider_type == ProviderType.CUSTOM.value: - provider = openai_provider - elif azure_openai_provider and azure_openai_provider.provider_type == ProviderType.CUSTOM.value: - provider = azure_openai_provider - elif openai_provider and openai_provider.provider_type == ProviderType.SYSTEM.value: - provider = openai_provider - elif azure_openai_provider and azure_openai_provider.provider_type == ProviderType.SYSTEM.value: - provider = azure_openai_provider - - if not provider: - raise ProviderTokenNotInitError( - f"No valid {provider_name} model provider credentials found. " - f"Please go to Settings -> Model Provider to complete your provider credentials." - ) - - provider_name = provider.provider_name - - return provider_name diff --git a/api/core/llm/moderation.py b/api/core/llm/moderation.py deleted file mode 100644 index d18d6fc5c..000000000 --- a/api/core/llm/moderation.py +++ /dev/null @@ -1,15 +0,0 @@ -import openai -from models.provider import ProviderName - - -class Moderation: - - def __init__(self, provider: str, api_key: str): - self.provider = provider - self.api_key = api_key - - if self.provider == ProviderName.OPENAI.value: - self.client = openai.Moderation - - def moderate(self, text): - return self.client.create(input=text, api_key=self.api_key) diff --git a/api/core/llm/provider/anthropic_provider.py b/api/core/llm/provider/anthropic_provider.py deleted file mode 100644 index d6165d032..000000000 --- a/api/core/llm/provider/anthropic_provider.py +++ /dev/null @@ -1,138 +0,0 @@ -import json -import logging -from typing import Optional, Union - -import anthropic -from langchain.chat_models import ChatAnthropic -from langchain.schema import HumanMessage - -from core import hosted_llm_credentials -from core.llm.error import ProviderTokenNotInitError -from core.llm.provider.base import BaseProvider -from core.llm.provider.errors import ValidateFailedError -from models.provider import ProviderName, ProviderType - - -class AnthropicProvider(BaseProvider): - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - return [ - { - 'id': 'claude-instant-1', - 'name': 'claude-instant-1', - }, - { - 'id': 'claude-2', - 'name': 'claude-2', - }, - ] - - def get_credentials(self, model_id: Optional[str] = None) -> dict: - return self.get_provider_api_key(model_id=model_id) - - def get_provider_name(self): - return ProviderName.ANTHROPIC - - def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]: - """ - Returns the provider configs. - """ - try: - config = self.get_provider_api_key(only_custom=only_custom) - except: - config = { - 'anthropic_api_key': '' - } - - if obfuscated: - if not config.get('anthropic_api_key'): - config = { - 'anthropic_api_key': '' - } - - config['anthropic_api_key'] = self.obfuscated_token(config.get('anthropic_api_key')) - return config - - return config - - def get_encrypted_token(self, config: Union[dict | str]): - """ - Returns the encrypted token. - """ - return json.dumps({ - 'anthropic_api_key': self.encrypt_token(config['anthropic_api_key']) - }) - - def get_decrypted_token(self, token: str): - """ - Returns the decrypted token. - """ - config = json.loads(token) - config['anthropic_api_key'] = self.decrypt_token(config['anthropic_api_key']) - return config - - def get_token_type(self): - return dict - - def config_validate(self, config: Union[dict | str]): - """ - Validates the given config. - """ - # check OpenAI / Azure OpenAI credential is valid - openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.OPENAI.value) - azure_openai_provider = BaseProvider.get_valid_provider(self.tenant_id, ProviderName.AZURE_OPENAI.value) - - provider = None - if openai_provider: - provider = openai_provider - elif azure_openai_provider: - provider = azure_openai_provider - - if not provider: - raise ValidateFailedError(f"OpenAI or Azure OpenAI provider must be configured first.") - - if provider.provider_type == ProviderType.SYSTEM.value: - quota_used = provider.quota_used if provider.quota_used is not None else 0 - quota_limit = provider.quota_limit if provider.quota_limit is not None else 0 - if quota_used >= quota_limit: - raise ValidateFailedError(f"Your quota for Dify Hosted OpenAI has been exhausted, " - f"please configure OpenAI or Azure OpenAI provider first.") - - try: - if not isinstance(config, dict): - raise ValueError('Config must be a object.') - - if 'anthropic_api_key' not in config: - raise ValueError('anthropic_api_key must be provided.') - - chat_llm = ChatAnthropic( - model='claude-instant-1', - anthropic_api_key=config['anthropic_api_key'], - max_tokens_to_sample=10, - temperature=0, - default_request_timeout=60 - ) - - messages = [ - HumanMessage( - content="ping" - ) - ] - - chat_llm(messages) - except anthropic.APIConnectionError as ex: - raise ValidateFailedError(f"Anthropic: Connection error, cause: {ex.__cause__}") - except (anthropic.APIStatusError, anthropic.RateLimitError) as ex: - raise ValidateFailedError(f"Anthropic: Error code: {ex.status_code} - " - f"{ex.body['error']['type']}: {ex.body['error']['message']}") - except Exception as ex: - logging.exception('Anthropic config validation failed') - raise ex - - def get_hosted_credentials(self) -> Union[str | dict]: - if not hosted_llm_credentials.anthropic or not hosted_llm_credentials.anthropic.api_key: - raise ProviderTokenNotInitError( - f"No valid {self.get_provider_name().value} model provider credentials found. " - f"Please go to Settings -> Model Provider to complete your provider credentials." - ) - - return {'anthropic_api_key': hosted_llm_credentials.anthropic.api_key} diff --git a/api/core/llm/provider/azure_provider.py b/api/core/llm/provider/azure_provider.py deleted file mode 100644 index 8d6345062..000000000 --- a/api/core/llm/provider/azure_provider.py +++ /dev/null @@ -1,145 +0,0 @@ -import json -import logging -from typing import Optional, Union - -import openai -import requests - -from core.llm.provider.base import BaseProvider -from core.llm.provider.errors import ValidateFailedError -from models.provider import ProviderName - - -AZURE_OPENAI_API_VERSION = '2023-07-01-preview' - - -class AzureProvider(BaseProvider): - def get_models(self, model_id: Optional[str] = None, credentials: Optional[dict] = None) -> list[dict]: - return [] - - def check_embedding_model(self, credentials: Optional[dict] = None): - credentials = self.get_credentials('text-embedding-ada-002') if not credentials else credentials - try: - result = openai.Embedding.create(input=['test'], - engine='text-embedding-ada-002', - timeout=60, - api_key=str(credentials.get('openai_api_key')), - api_base=str(credentials.get('openai_api_base')), - api_type='azure', - api_version=str(credentials.get('openai_api_version')))["data"][0][ - "embedding"] - except openai.error.AuthenticationError as e: - raise AzureAuthenticationError(str(e)) - except openai.error.APIConnectionError as e: - raise AzureRequestFailedError( - 'Failed to request Azure OpenAI, please check your API Base Endpoint, The format is `https://xxx.openai.azure.com/`') - except openai.error.InvalidRequestError as e: - if e.http_status == 404: - raise AzureRequestFailedError("Please check your 'gpt-3.5-turbo' or 'text-embedding-ada-002' " - "deployment name is exists in Azure AI") - else: - raise AzureRequestFailedError( - 'Failed to request Azure OpenAI. cause: {}'.format(str(e))) - except openai.error.OpenAIError as e: - raise AzureRequestFailedError( - 'Failed to request Azure OpenAI. cause: {}'.format(str(e))) - - if not isinstance(result, list): - raise AzureRequestFailedError('Failed to request Azure OpenAI.') - - def get_credentials(self, model_id: Optional[str] = None) -> dict: - """ - Returns the API credentials for Azure OpenAI as a dictionary. - """ - config = self.get_provider_api_key(model_id=model_id) - config['openai_api_type'] = 'azure' - config['openai_api_version'] = AZURE_OPENAI_API_VERSION - if model_id == 'text-embedding-ada-002': - config['deployment'] = model_id.replace('.', '') if model_id else None - config['chunk_size'] = 16 - else: - config['deployment_name'] = model_id.replace('.', '') if model_id else None - return config - - def get_provider_name(self): - return ProviderName.AZURE_OPENAI - - def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]: - """ - Returns the provider configs. - """ - try: - config = self.get_provider_api_key(only_custom=only_custom) - except: - config = { - 'openai_api_type': 'azure', - 'openai_api_version': AZURE_OPENAI_API_VERSION, - 'openai_api_base': '', - 'openai_api_key': '' - } - - if obfuscated: - if not config.get('openai_api_key'): - config = { - 'openai_api_type': 'azure', - 'openai_api_version': AZURE_OPENAI_API_VERSION, - 'openai_api_base': '', - 'openai_api_key': '' - } - - config['openai_api_key'] = self.obfuscated_token(config.get('openai_api_key')) - return config - - return config - - def get_token_type(self): - return dict - - def config_validate(self, config: Union[dict | str]): - """ - Validates the given config. - """ - try: - if not isinstance(config, dict): - raise ValueError('Config must be a object.') - - if 'openai_api_version' not in config: - config['openai_api_version'] = AZURE_OPENAI_API_VERSION - - self.check_embedding_model(credentials=config) - except ValidateFailedError as e: - raise e - except AzureAuthenticationError: - raise ValidateFailedError('Validation failed, please check your API Key.') - except AzureRequestFailedError as ex: - raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex))) - except Exception as ex: - logging.exception('Azure OpenAI Credentials validation failed') - raise ValidateFailedError('Validation failed, error: {}.'.format(str(ex))) - - def get_encrypted_token(self, config: Union[dict | str]): - """ - Returns the encrypted token. - """ - return json.dumps({ - 'openai_api_type': 'azure', - 'openai_api_version': AZURE_OPENAI_API_VERSION, - 'openai_api_base': config['openai_api_base'], - 'openai_api_key': self.encrypt_token(config['openai_api_key']) - }) - - def get_decrypted_token(self, token: str): - """ - Returns the decrypted token. - """ - config = json.loads(token) - config['openai_api_key'] = self.decrypt_token(config['openai_api_key']) - return config - - -class AzureAuthenticationError(Exception): - pass - - -class AzureRequestFailedError(Exception): - pass diff --git a/api/core/llm/provider/base.py b/api/core/llm/provider/base.py deleted file mode 100644 index c3ff5cf23..000000000 --- a/api/core/llm/provider/base.py +++ /dev/null @@ -1,132 +0,0 @@ -import base64 -from abc import ABC, abstractmethod -from typing import Optional, Union - -from core.constant import llm_constant -from core.llm.error import QuotaExceededError, ModelCurrentlyNotSupportError, ProviderTokenNotInitError -from extensions.ext_database import db -from libs import rsa -from models.account import Tenant -from models.provider import Provider, ProviderType, ProviderName - - -class BaseProvider(ABC): - def __init__(self, tenant_id: str): - self.tenant_id = tenant_id - - def get_provider_api_key(self, model_id: Optional[str] = None, only_custom: bool = False) -> Union[str | dict]: - """ - Returns the decrypted API key for the given tenant_id and provider_name. - If the provider is of type SYSTEM and the quota is exceeded, raises a QuotaExceededError. - If the provider is not found or not valid, raises a ProviderTokenNotInitError. - """ - provider = self.get_provider(only_custom) - if not provider: - raise ProviderTokenNotInitError( - f"No valid {llm_constant.models[model_id]} model provider credentials found. " - f"Please go to Settings -> Model Provider to complete your provider credentials." - ) - - if provider.provider_type == ProviderType.SYSTEM.value: - quota_used = provider.quota_used if provider.quota_used is not None else 0 - quota_limit = provider.quota_limit if provider.quota_limit is not None else 0 - - if model_id and model_id == 'gpt-4': - raise ModelCurrentlyNotSupportError() - - if quota_used >= quota_limit: - raise QuotaExceededError() - - return self.get_hosted_credentials() - else: - return self.get_decrypted_token(provider.encrypted_config) - - def get_provider(self, only_custom: bool = False) -> Optional[Provider]: - """ - Returns the Provider instance for the given tenant_id and provider_name. - If both CUSTOM and System providers exist, the preferred provider will be returned based on the prefer_custom flag. - """ - return BaseProvider.get_valid_provider(self.tenant_id, self.get_provider_name().value, only_custom) - - @classmethod - def get_valid_provider(cls, tenant_id: str, provider_name: str = None, only_custom: bool = False) -> Optional[ - Provider]: - """ - Returns the Provider instance for the given tenant_id and provider_name. - If both CUSTOM and System providers exist. - """ - query = db.session.query(Provider).filter( - Provider.tenant_id == tenant_id - ) - - if provider_name: - query = query.filter(Provider.provider_name == provider_name) - - if only_custom: - query = query.filter(Provider.provider_type == ProviderType.CUSTOM.value) - - providers = query.order_by(Provider.provider_type.asc()).all() - - for provider in providers: - if provider.provider_type == ProviderType.CUSTOM.value and provider.is_valid and provider.encrypted_config: - return provider - elif provider.provider_type == ProviderType.SYSTEM.value and provider.is_valid: - return provider - - return None - - def get_hosted_credentials(self) -> Union[str | dict]: - raise ProviderTokenNotInitError( - f"No valid {self.get_provider_name().value} model provider credentials found. " - f"Please go to Settings -> Model Provider to complete your provider credentials." - ) - - def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]: - """ - Returns the provider configs. - """ - try: - config = self.get_provider_api_key(only_custom=only_custom) - except: - config = '' - - if obfuscated: - return self.obfuscated_token(config) - - return config - - def obfuscated_token(self, token: str): - return token[:6] + '*' * (len(token) - 8) + token[-2:] - - def get_token_type(self): - return str - - def get_encrypted_token(self, config: Union[dict | str]): - return self.encrypt_token(config) - - def get_decrypted_token(self, token: str): - return self.decrypt_token(token) - - def encrypt_token(self, token): - tenant = db.session.query(Tenant).filter(Tenant.id == self.tenant_id).first() - encrypted_token = rsa.encrypt(token, tenant.encrypt_public_key) - return base64.b64encode(encrypted_token).decode() - - def decrypt_token(self, token): - return rsa.decrypt(base64.b64decode(token), self.tenant_id) - - @abstractmethod - def get_provider_name(self): - raise NotImplementedError - - @abstractmethod - def get_credentials(self, model_id: Optional[str] = None) -> dict: - raise NotImplementedError - - @abstractmethod - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - raise NotImplementedError - - @abstractmethod - def config_validate(self, config: str): - raise NotImplementedError diff --git a/api/core/llm/provider/errors.py b/api/core/llm/provider/errors.py deleted file mode 100644 index 407b7f790..000000000 --- a/api/core/llm/provider/errors.py +++ /dev/null @@ -1,2 +0,0 @@ -class ValidateFailedError(Exception): - description = "Provider Validate failed" diff --git a/api/core/llm/provider/huggingface_provider.py b/api/core/llm/provider/huggingface_provider.py deleted file mode 100644 index b3dd3ed57..000000000 --- a/api/core/llm/provider/huggingface_provider.py +++ /dev/null @@ -1,22 +0,0 @@ -from typing import Optional - -from core.llm.provider.base import BaseProvider -from models.provider import ProviderName - - -class HuggingfaceProvider(BaseProvider): - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - credentials = self.get_credentials(model_id) - # todo - return [] - - def get_credentials(self, model_id: Optional[str] = None) -> dict: - """ - Returns the API credentials for Huggingface as a dictionary, for the given tenant_id. - """ - return { - 'huggingface_api_key': self.get_provider_api_key(model_id=model_id) - } - - def get_provider_name(self): - return ProviderName.HUGGINGFACEHUB \ No newline at end of file diff --git a/api/core/llm/provider/llm_provider_service.py b/api/core/llm/provider/llm_provider_service.py deleted file mode 100644 index a520e3d6b..000000000 --- a/api/core/llm/provider/llm_provider_service.py +++ /dev/null @@ -1,53 +0,0 @@ -from typing import Optional, Union - -from core.llm.provider.anthropic_provider import AnthropicProvider -from core.llm.provider.azure_provider import AzureProvider -from core.llm.provider.base import BaseProvider -from core.llm.provider.huggingface_provider import HuggingfaceProvider -from core.llm.provider.openai_provider import OpenAIProvider -from models.provider import Provider - - -class LLMProviderService: - - def __init__(self, tenant_id: str, provider_name: str): - self.provider = self.init_provider(tenant_id, provider_name) - - def init_provider(self, tenant_id: str, provider_name: str) -> BaseProvider: - if provider_name == 'openai': - return OpenAIProvider(tenant_id) - elif provider_name == 'azure_openai': - return AzureProvider(tenant_id) - elif provider_name == 'anthropic': - return AnthropicProvider(tenant_id) - elif provider_name == 'huggingface': - return HuggingfaceProvider(tenant_id) - else: - raise Exception('provider {} not found'.format(provider_name)) - - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - return self.provider.get_models(model_id) - - def get_credentials(self, model_id: Optional[str] = None) -> dict: - return self.provider.get_credentials(model_id) - - def get_provider_configs(self, obfuscated: bool = False, only_custom: bool = False) -> Union[str | dict]: - return self.provider.get_provider_configs(obfuscated=obfuscated, only_custom=only_custom) - - def get_provider_db_record(self) -> Optional[Provider]: - return self.provider.get_provider() - - def config_validate(self, config: Union[dict | str]): - """ - Validates the given config. - - :param config: - :raises: ValidateFailedError - """ - return self.provider.config_validate(config) - - def get_token_type(self): - return self.provider.get_token_type() - - def get_encrypted_token(self, config: Union[dict | str]): - return self.provider.get_encrypted_token(config) diff --git a/api/core/llm/provider/openai_provider.py b/api/core/llm/provider/openai_provider.py deleted file mode 100644 index b24e98e5d..000000000 --- a/api/core/llm/provider/openai_provider.py +++ /dev/null @@ -1,55 +0,0 @@ -import logging -from typing import Optional, Union - -import openai -from openai.error import AuthenticationError, OpenAIError - -from core import hosted_llm_credentials -from core.llm.error import ProviderTokenNotInitError -from core.llm.moderation import Moderation -from core.llm.provider.base import BaseProvider -from core.llm.provider.errors import ValidateFailedError -from models.provider import ProviderName - - -class OpenAIProvider(BaseProvider): - def get_models(self, model_id: Optional[str] = None) -> list[dict]: - credentials = self.get_credentials(model_id) - response = openai.Model.list(**credentials) - - return [{ - 'id': model['id'], - 'name': model['id'], - } for model in response['data']] - - def get_credentials(self, model_id: Optional[str] = None) -> dict: - """ - Returns the credentials for the given tenant_id and provider_name. - """ - return { - 'openai_api_key': self.get_provider_api_key(model_id=model_id) - } - - def get_provider_name(self): - return ProviderName.OPENAI - - def config_validate(self, config: Union[dict | str]): - """ - Validates the given config. - """ - try: - Moderation(self.get_provider_name().value, config).moderate('test') - except (AuthenticationError, OpenAIError) as ex: - raise ValidateFailedError(str(ex)) - except Exception as ex: - logging.exception('OpenAI config validation failed') - raise ex - - def get_hosted_credentials(self) -> Union[str | dict]: - if not hosted_llm_credentials.openai or not hosted_llm_credentials.openai.api_key: - raise ProviderTokenNotInitError( - f"No valid {self.get_provider_name().value} model provider credentials found. " - f"Please go to Settings -> Model Provider to complete your provider credentials." - ) - - return hosted_llm_credentials.openai.api_key diff --git a/api/core/llm/streamable_chat_anthropic.py b/api/core/llm/streamable_chat_anthropic.py deleted file mode 100644 index 9b9422791..000000000 --- a/api/core/llm/streamable_chat_anthropic.py +++ /dev/null @@ -1,62 +0,0 @@ -from typing import List, Optional, Any, Dict - -from httpx import Timeout -from langchain.callbacks.manager import Callbacks -from langchain.chat_models import ChatAnthropic -from langchain.schema import BaseMessage, LLMResult, SystemMessage, AIMessage, HumanMessage, ChatMessage -from pydantic import root_validator - -from core.llm.wrappers.anthropic_wrapper import handle_anthropic_exceptions - - -class StreamableChatAnthropic(ChatAnthropic): - """ - Wrapper around Anthropic's large language model. - """ - - default_request_timeout: Optional[float] = Timeout(timeout=300.0, connect=5.0) - - @root_validator() - def prepare_params(cls, values: Dict) -> Dict: - values['model_name'] = values.get('model') - values['max_tokens'] = values.get('max_tokens_to_sample') - return values - - @handle_anthropic_exceptions - def generate( - self, - messages: List[List[BaseMessage]], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - *, - tags: Optional[List[str]] = None, - metadata: Optional[Dict[str, Any]] = None, - **kwargs: Any, - ) -> LLMResult: - return super().generate(messages, stop, callbacks, tags=tags, metadata=metadata, **kwargs) - - @classmethod - def get_kwargs_from_model_params(cls, params: dict): - params['model'] = params.get('model_name') - del params['model_name'] - - params['max_tokens_to_sample'] = params.get('max_tokens') - del params['max_tokens'] - - del params['frequency_penalty'] - del params['presence_penalty'] - - return params - - def _convert_one_message_to_text(self, message: BaseMessage) -> str: - if isinstance(message, ChatMessage): - message_text = f"\n\n{message.role.capitalize()}: {message.content}" - elif isinstance(message, HumanMessage): - message_text = f"{self.HUMAN_PROMPT} {message.content}" - elif isinstance(message, AIMessage): - message_text = f"{self.AI_PROMPT} {message.content}" - elif isinstance(message, SystemMessage): - message_text = f"{message.content}" - else: - raise ValueError(f"Got unknown type {message}") - return message_text \ No newline at end of file diff --git a/api/core/llm/token_calculator.py b/api/core/llm/token_calculator.py deleted file mode 100644 index e45f2b4d6..000000000 --- a/api/core/llm/token_calculator.py +++ /dev/null @@ -1,41 +0,0 @@ -import decimal -from typing import Optional - -import tiktoken - -from core.constant import llm_constant - - -class TokenCalculator: - @classmethod - def get_num_tokens(cls, model_name: str, text: str): - if len(text) == 0: - return 0 - - enc = tiktoken.encoding_for_model(model_name) - - tokenized_text = enc.encode(text) - - # calculate the number of tokens in the encoded text - return len(tokenized_text) - - @classmethod - def get_token_price(cls, model_name: str, tokens: int, text_type: Optional[str] = None) -> decimal.Decimal: - if model_name in llm_constant.models_by_mode['embedding']: - unit_price = llm_constant.model_prices[model_name]['usage'] - elif text_type == 'prompt': - unit_price = llm_constant.model_prices[model_name]['prompt'] - elif text_type == 'completion': - unit_price = llm_constant.model_prices[model_name]['completion'] - else: - raise Exception('Invalid text type') - - tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), - rounding=decimal.ROUND_HALF_UP) - - total_price = tokens_per_1k * unit_price - return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) - - @classmethod - def get_currency(cls, model_name: str): - return llm_constant.model_currency diff --git a/api/core/llm/whisper.py b/api/core/llm/whisper.py deleted file mode 100644 index 7f3bf3d79..000000000 --- a/api/core/llm/whisper.py +++ /dev/null @@ -1,26 +0,0 @@ -import openai - -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions -from models.provider import ProviderName -from core.llm.provider.base import BaseProvider - - -class Whisper: - - def __init__(self, provider: BaseProvider): - self.provider = provider - - if self.provider.get_provider_name() == ProviderName.OPENAI: - self.client = openai.Audio - self.credentials = provider.get_credentials() - - @handle_openai_exceptions - def transcribe(self, file): - return self.client.transcribe( - model='whisper-1', - file=file, - api_key=self.credentials.get('openai_api_key'), - api_base=self.credentials.get('openai_api_base'), - api_type=self.credentials.get('openai_api_type'), - api_version=self.credentials.get('openai_api_version'), - ) diff --git a/api/core/llm/wrappers/anthropic_wrapper.py b/api/core/llm/wrappers/anthropic_wrapper.py deleted file mode 100644 index 7fddc277d..000000000 --- a/api/core/llm/wrappers/anthropic_wrapper.py +++ /dev/null @@ -1,27 +0,0 @@ -import logging -from functools import wraps - -import anthropic - -from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, \ - LLMBadRequestError - - -def handle_anthropic_exceptions(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except anthropic.APIConnectionError as e: - logging.exception("Failed to connect to Anthropic API.") - raise LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {e.__cause__}") - except anthropic.RateLimitError: - raise LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.") - except anthropic.AuthenticationError as e: - raise LLMAuthorizationError(f"Anthropic: {e.message}") - except anthropic.BadRequestError as e: - raise LLMBadRequestError(f"Anthropic: {e.message}") - except anthropic.APIStatusError as e: - raise LLMAPIUnavailableError(f"Anthropic: code: {e.status_code}, cause: {e.message}") - - return wrapper diff --git a/api/core/llm/wrappers/openai_wrapper.py b/api/core/llm/wrappers/openai_wrapper.py deleted file mode 100644 index 7f96e75ed..000000000 --- a/api/core/llm/wrappers/openai_wrapper.py +++ /dev/null @@ -1,31 +0,0 @@ -import logging -from functools import wraps - -import openai - -from core.llm.error import LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, LLMAuthorizationError, \ - LLMBadRequestError - - -def handle_openai_exceptions(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except openai.error.InvalidRequestError as e: - logging.exception("Invalid request to OpenAI API.") - raise LLMBadRequestError(str(e)) - except openai.error.APIConnectionError as e: - logging.exception("Failed to connect to OpenAI API.") - raise LLMAPIConnectionError(e.__class__.__name__ + ":" + str(e)) - except (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout) as e: - logging.exception("OpenAI service unavailable.") - raise LLMAPIUnavailableError(e.__class__.__name__ + ":" + str(e)) - except openai.error.RateLimitError as e: - raise LLMRateLimitError(str(e)) - except openai.error.AuthenticationError as e: - raise LLMAuthorizationError(str(e)) - except openai.error.OpenAIError as e: - raise LLMBadRequestError(e.__class__.__name__ + ":" + str(e)) - - return wrapper diff --git a/api/core/memory/read_only_conversation_token_db_buffer_shared_memory.py b/api/core/memory/read_only_conversation_token_db_buffer_shared_memory.py index d96187ece..55d70d38a 100644 --- a/api/core/memory/read_only_conversation_token_db_buffer_shared_memory.py +++ b/api/core/memory/read_only_conversation_token_db_buffer_shared_memory.py @@ -1,10 +1,10 @@ -from typing import Any, List, Dict, Union +from typing import Any, List, Dict from langchain.memory.chat_memory import BaseChatMemory -from langchain.schema import get_buffer_string, BaseMessage, HumanMessage, AIMessage, BaseLanguageModel +from langchain.schema import get_buffer_string, BaseMessage -from core.llm.streamable_chat_open_ai import StreamableChatOpenAI -from core.llm.streamable_open_ai import StreamableOpenAI +from core.model_providers.models.entity.message import PromptMessage, MessageType, to_lc_messages +from core.model_providers.models.llm.base import BaseLLM from extensions.ext_database import db from models.model import Conversation, Message @@ -13,7 +13,7 @@ class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory): conversation: Conversation human_prefix: str = "Human" ai_prefix: str = "Assistant" - llm: BaseLanguageModel + model_instance: BaseLLM memory_key: str = "chat_history" max_token_limit: int = 2000 message_limit: int = 10 @@ -29,23 +29,23 @@ class ReadOnlyConversationTokenDBBufferSharedMemory(BaseChatMemory): messages = list(reversed(messages)) - chat_messages: List[BaseMessage] = [] + chat_messages: List[PromptMessage] = [] for message in messages: - chat_messages.append(HumanMessage(content=message.query)) - chat_messages.append(AIMessage(content=message.answer)) + chat_messages.append(PromptMessage(content=message.query, type=MessageType.HUMAN)) + chat_messages.append(PromptMessage(content=message.answer, type=MessageType.ASSISTANT)) if not chat_messages: - return chat_messages + return [] # prune the chat message if it exceeds the max token limit - curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages) + curr_buffer_length = self.model_instance.get_num_tokens(chat_messages) if curr_buffer_length > self.max_token_limit: pruned_memory = [] while curr_buffer_length > self.max_token_limit and chat_messages: pruned_memory.append(chat_messages.pop(0)) - curr_buffer_length = self.llm.get_num_tokens_from_messages(chat_messages) + curr_buffer_length = self.model_instance.get_num_tokens(chat_messages) - return chat_messages + return to_lc_messages(chat_messages) @property def memory_variables(self) -> List[str]: diff --git a/api/core/llm/error.py b/api/core/model_providers/error.py similarity index 100% rename from api/core/llm/error.py rename to api/core/model_providers/error.py diff --git a/api/core/model_providers/model_factory.py b/api/core/model_providers/model_factory.py new file mode 100644 index 000000000..b76a64025 --- /dev/null +++ b/api/core/model_providers/model_factory.py @@ -0,0 +1,293 @@ +from typing import Optional + +from langchain.callbacks.base import Callbacks + +from core.model_providers.error import ProviderTokenNotInitError, LLMBadRequestError +from core.model_providers.model_provider_factory import ModelProviderFactory, DEFAULT_MODELS +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.embedding.base import BaseEmbedding +from core.model_providers.models.entity.model_params import ModelKwargs, ModelType +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.speech2text.base import BaseSpeech2Text +from extensions.ext_database import db +from models.provider import TenantDefaultModel + + +class ModelFactory: + + @classmethod + def get_text_generation_model_from_model_config(cls, tenant_id: str, + model_config: dict, + streaming: bool = False, + callbacks: Callbacks = None) -> Optional[BaseLLM]: + provider_name = model_config.get("provider") + model_name = model_config.get("name") + completion_params = model_config.get("completion_params", {}) + + return cls.get_text_generation_model( + tenant_id=tenant_id, + model_provider_name=provider_name, + model_name=model_name, + model_kwargs=ModelKwargs( + temperature=completion_params.get('temperature', 0), + max_tokens=completion_params.get('max_tokens', 256), + top_p=completion_params.get('top_p', 0), + frequency_penalty=completion_params.get('frequency_penalty', 0.1), + presence_penalty=completion_params.get('presence_penalty', 0.1) + ), + streaming=streaming, + callbacks=callbacks + ) + + @classmethod + def get_text_generation_model(cls, + tenant_id: str, + model_provider_name: Optional[str] = None, + model_name: Optional[str] = None, + model_kwargs: Optional[ModelKwargs] = None, + streaming: bool = False, + callbacks: Callbacks = None) -> Optional[BaseLLM]: + """ + get text generation model. + + :param tenant_id: a string representing the ID of the tenant. + :param model_provider_name: + :param model_name: + :param model_kwargs: + :param streaming: + :param callbacks: + :return: + """ + is_default_model = False + if model_provider_name is None and model_name is None: + default_model = cls.get_default_model(tenant_id, ModelType.TEXT_GENERATION) + + if not default_model: + raise LLMBadRequestError(f"Default model is not available. " + f"Please configure a Default System Reasoning Model " + f"in the Settings -> Model Provider.") + + model_provider_name = default_model.provider_name + model_name = default_model.model_name + is_default_model = True + + # get model provider + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + + if not model_provider: + raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.") + + # init text generation model + model_class = model_provider.get_model_class(model_type=ModelType.TEXT_GENERATION) + + try: + model_instance = model_class( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs, + streaming=streaming, + callbacks=callbacks + ) + except LLMBadRequestError as e: + if is_default_model: + raise LLMBadRequestError(f"Default model {model_name} is not available. " + f"Please check your model provider credentials.") + else: + raise e + + if is_default_model: + model_instance.deduct_quota = False + + return model_instance + + @classmethod + def get_embedding_model(cls, + tenant_id: str, + model_provider_name: Optional[str] = None, + model_name: Optional[str] = None) -> Optional[BaseEmbedding]: + """ + get embedding model. + + :param tenant_id: a string representing the ID of the tenant. + :param model_provider_name: + :param model_name: + :return: + """ + if model_provider_name is None and model_name is None: + default_model = cls.get_default_model(tenant_id, ModelType.EMBEDDINGS) + + if not default_model: + raise LLMBadRequestError(f"Default model is not available. " + f"Please configure a Default Embedding Model " + f"in the Settings -> Model Provider.") + + model_provider_name = default_model.provider_name + model_name = default_model.model_name + + # get model provider + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + + if not model_provider: + raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.") + + # init embedding model + model_class = model_provider.get_model_class(model_type=ModelType.EMBEDDINGS) + return model_class( + model_provider=model_provider, + name=model_name + ) + + @classmethod + def get_speech2text_model(cls, + tenant_id: str, + model_provider_name: Optional[str] = None, + model_name: Optional[str] = None) -> Optional[BaseSpeech2Text]: + """ + get speech to text model. + + :param tenant_id: a string representing the ID of the tenant. + :param model_provider_name: + :param model_name: + :return: + """ + if model_provider_name is None and model_name is None: + default_model = cls.get_default_model(tenant_id, ModelType.SPEECH_TO_TEXT) + + if not default_model: + raise LLMBadRequestError(f"Default model is not available. " + f"Please configure a Default Speech-to-Text Model " + f"in the Settings -> Model Provider.") + + model_provider_name = default_model.provider_name + model_name = default_model.model_name + + # get model provider + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + + if not model_provider: + raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.") + + # init speech to text model + model_class = model_provider.get_model_class(model_type=ModelType.SPEECH_TO_TEXT) + return model_class( + model_provider=model_provider, + name=model_name + ) + + @classmethod + def get_moderation_model(cls, + tenant_id: str, + model_provider_name: str, + model_name: str) -> Optional[BaseProviderModel]: + """ + get moderation model. + + :param tenant_id: a string representing the ID of the tenant. + :param model_provider_name: + :param model_name: + :return: + """ + # get model provider + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + + if not model_provider: + raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.") + + # init moderation model + model_class = model_provider.get_model_class(model_type=ModelType.MODERATION) + return model_class( + model_provider=model_provider, + name=model_name + ) + + @classmethod + def get_default_model(cls, tenant_id: str, model_type: ModelType) -> TenantDefaultModel: + """ + get default model of model type. + + :param tenant_id: + :param model_type: + :return: + """ + # get default model + default_model = db.session.query(TenantDefaultModel) \ + .filter( + TenantDefaultModel.tenant_id == tenant_id, + TenantDefaultModel.model_type == model_type.value + ).first() + + if not default_model: + model_provider_rules = ModelProviderFactory.get_provider_rules() + for model_provider_name, model_provider_rule in model_provider_rules.items(): + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + if not model_provider: + continue + + model_list = model_provider.get_supported_model_list(model_type) + if model_list: + model_info = model_list[0] + default_model = TenantDefaultModel( + tenant_id=tenant_id, + model_type=model_type.value, + provider_name=model_provider_name, + model_name=model_info['id'] + ) + db.session.add(default_model) + db.session.commit() + break + + return default_model + + @classmethod + def update_default_model(cls, + tenant_id: str, + model_type: ModelType, + provider_name: str, + model_name: str) -> TenantDefaultModel: + """ + update default model of model type. + + :param tenant_id: + :param model_type: + :param provider_name: + :param model_name: + :return: + """ + model_provider_name = ModelProviderFactory.get_provider_names() + if provider_name not in model_provider_name: + raise ValueError(f'Invalid provider name: {provider_name}') + + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, provider_name) + + if not model_provider: + raise ProviderTokenNotInitError(f"Model {model_name} provider credentials is not initialized.") + + model_list = model_provider.get_supported_model_list(model_type) + model_ids = [model['id'] for model in model_list] + if model_name not in model_ids: + raise ValueError(f'Invalid model name: {model_name}') + + # get default model + default_model = db.session.query(TenantDefaultModel) \ + .filter( + TenantDefaultModel.tenant_id == tenant_id, + TenantDefaultModel.model_type == model_type.value + ).first() + + if default_model: + # update default model + default_model.provider_name = provider_name + default_model.model_name = model_name + db.session.commit() + else: + # create default model + default_model = TenantDefaultModel( + tenant_id=tenant_id, + model_type=model_type.value, + provider_name=provider_name, + model_name=model_name, + ) + db.session.add(default_model) + db.session.commit() + + return default_model diff --git a/api/core/model_providers/model_provider_factory.py b/api/core/model_providers/model_provider_factory.py new file mode 100644 index 000000000..e2d8b4360 --- /dev/null +++ b/api/core/model_providers/model_provider_factory.py @@ -0,0 +1,228 @@ +from typing import Type + +from sqlalchemy.exc import IntegrityError + +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import BaseModelProvider +from core.model_providers.rules import provider_rules +from extensions.ext_database import db +from models.provider import TenantPreferredModelProvider, ProviderType, Provider, ProviderQuotaType + +DEFAULT_MODELS = { + ModelType.TEXT_GENERATION.value: { + 'provider_name': 'openai', + 'model_name': 'gpt-3.5-turbo', + }, + ModelType.EMBEDDINGS.value: { + 'provider_name': 'openai', + 'model_name': 'text-embedding-ada-002', + }, + ModelType.SPEECH_TO_TEXT.value: { + 'provider_name': 'openai', + 'model_name': 'whisper-1', + } +} + + +class ModelProviderFactory: + @classmethod + def get_model_provider_class(cls, provider_name: str) -> Type[BaseModelProvider]: + if provider_name == 'openai': + from core.model_providers.providers.openai_provider import OpenAIProvider + return OpenAIProvider + elif provider_name == 'anthropic': + from core.model_providers.providers.anthropic_provider import AnthropicProvider + return AnthropicProvider + elif provider_name == 'minimax': + from core.model_providers.providers.minimax_provider import MinimaxProvider + return MinimaxProvider + elif provider_name == 'spark': + from core.model_providers.providers.spark_provider import SparkProvider + return SparkProvider + elif provider_name == 'tongyi': + from core.model_providers.providers.tongyi_provider import TongyiProvider + return TongyiProvider + elif provider_name == 'wenxin': + from core.model_providers.providers.wenxin_provider import WenxinProvider + return WenxinProvider + elif provider_name == 'chatglm': + from core.model_providers.providers.chatglm_provider import ChatGLMProvider + return ChatGLMProvider + elif provider_name == 'azure_openai': + from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider + return AzureOpenAIProvider + elif provider_name == 'replicate': + from core.model_providers.providers.replicate_provider import ReplicateProvider + return ReplicateProvider + elif provider_name == 'huggingface_hub': + from core.model_providers.providers.huggingface_hub_provider import HuggingfaceHubProvider + return HuggingfaceHubProvider + else: + raise NotImplementedError + + @classmethod + def get_provider_names(cls): + """ + Returns a list of provider names. + """ + return list(provider_rules.keys()) + + @classmethod + def get_provider_rules(cls): + """ + Returns a list of provider rules. + + :return: + """ + return provider_rules + + @classmethod + def get_provider_rule(cls, provider_name: str): + """ + Returns provider rule. + """ + return provider_rules[provider_name] + + @classmethod + def get_preferred_model_provider(cls, tenant_id: str, model_provider_name: str): + """ + get preferred model provider. + + :param tenant_id: a string representing the ID of the tenant. + :param model_provider_name: + :return: + """ + # get preferred provider + preferred_provider = cls._get_preferred_provider(tenant_id, model_provider_name) + if not preferred_provider or not preferred_provider.is_valid: + return None + + # init model provider + model_provider_class = ModelProviderFactory.get_model_provider_class(model_provider_name) + return model_provider_class(provider=preferred_provider) + + @classmethod + def get_preferred_type_by_preferred_model_provider(cls, + tenant_id: str, + model_provider_name: str, + preferred_model_provider: TenantPreferredModelProvider): + """ + get preferred provider type by preferred model provider. + + :param model_provider_name: + :param preferred_model_provider: + :return: + """ + if not preferred_model_provider: + model_provider_rules = ModelProviderFactory.get_provider_rule(model_provider_name) + support_provider_types = model_provider_rules['support_provider_types'] + + if ProviderType.CUSTOM.value in support_provider_types: + custom_provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == model_provider_name, + Provider.provider_type == ProviderType.CUSTOM.value, + Provider.is_valid == True + ).first() + + if custom_provider: + return ProviderType.CUSTOM.value + + model_provider = cls.get_model_provider_class(model_provider_name) + + if ProviderType.SYSTEM.value in support_provider_types \ + and model_provider.is_provider_type_system_supported(): + return ProviderType.SYSTEM.value + elif ProviderType.CUSTOM.value in support_provider_types: + return ProviderType.CUSTOM.value + else: + return preferred_model_provider.preferred_provider_type + + @classmethod + def _get_preferred_provider(cls, tenant_id: str, model_provider_name: str): + """ + get preferred provider of tenant. + + :param tenant_id: + :param model_provider_name: + :return: + """ + # get preferred provider type + preferred_provider_type = cls._get_preferred_provider_type(tenant_id, model_provider_name) + + # get providers by preferred provider type + providers = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == model_provider_name, + Provider.provider_type == preferred_provider_type + ).all() + + no_system_provider = False + if preferred_provider_type == ProviderType.SYSTEM.value: + quota_type_to_provider_dict = {} + for provider in providers: + quota_type_to_provider_dict[provider.quota_type] = provider + + model_provider_rules = ModelProviderFactory.get_provider_rule(model_provider_name) + for quota_type_enum in ProviderQuotaType: + quota_type = quota_type_enum.value + if quota_type in model_provider_rules['system_config']['supported_quota_types'] \ + and quota_type in quota_type_to_provider_dict.keys(): + provider = quota_type_to_provider_dict[quota_type] + if provider.is_valid and provider.quota_limit > provider.quota_used: + return provider + + no_system_provider = True + + if no_system_provider: + providers = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == model_provider_name, + Provider.provider_type == ProviderType.CUSTOM.value + ).all() + + if preferred_provider_type == ProviderType.CUSTOM.value or no_system_provider: + if providers: + return providers[0] + else: + try: + provider = Provider( + tenant_id=tenant_id, + provider_name=model_provider_name, + provider_type=ProviderType.CUSTOM.value, + is_valid=False + ) + db.session.add(provider) + db.session.commit() + except IntegrityError: + db.session.rollback() + provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == model_provider_name, + Provider.provider_type == ProviderType.CUSTOM.value + ).first() + + return provider + + return None + + @classmethod + def _get_preferred_provider_type(cls, tenant_id: str, model_provider_name: str): + """ + get preferred provider type of tenant. + + :param tenant_id: + :param model_provider_name: + :return: + """ + preferred_model_provider = db.session.query(TenantPreferredModelProvider) \ + .filter( + TenantPreferredModelProvider.tenant_id == tenant_id, + TenantPreferredModelProvider.provider_name == model_provider_name + ).first() + + return cls.get_preferred_type_by_preferred_model_provider(tenant_id, model_provider_name, preferred_model_provider) diff --git a/api/tests/test_libs/__init__.py b/api/core/model_providers/models/__init__.py similarity index 100% rename from api/tests/test_libs/__init__.py rename to api/core/model_providers/models/__init__.py diff --git a/api/core/model_providers/models/base.py b/api/core/model_providers/models/base.py new file mode 100644 index 000000000..01f83efa8 --- /dev/null +++ b/api/core/model_providers/models/base.py @@ -0,0 +1,22 @@ +from abc import ABC +from typing import Any + +from core.model_providers.providers.base import BaseModelProvider + + +class BaseProviderModel(ABC): + _client: Any + _model_provider: BaseModelProvider + + def __init__(self, model_provider: BaseModelProvider, client: Any): + self._model_provider = model_provider + self._client = client + + @property + def client(self): + return self._client + + @property + def model_provider(self): + return self._model_provider + diff --git a/api/tests/test_models/__init__.py b/api/core/model_providers/models/embedding/__init__.py similarity index 100% rename from api/tests/test_models/__init__.py rename to api/core/model_providers/models/embedding/__init__.py diff --git a/api/core/model_providers/models/embedding/azure_openai_embedding.py b/api/core/model_providers/models/embedding/azure_openai_embedding.py new file mode 100644 index 000000000..81f08784b --- /dev/null +++ b/api/core/model_providers/models/embedding/azure_openai_embedding.py @@ -0,0 +1,78 @@ +import decimal +import logging + +import openai +import tiktoken +from langchain.embeddings import OpenAIEmbeddings + +from core.model_providers.error import LLMBadRequestError, LLMAuthorizationError, LLMRateLimitError, \ + LLMAPIUnavailableError, LLMAPIConnectionError +from core.model_providers.models.embedding.base import BaseEmbedding +from core.model_providers.providers.base import BaseModelProvider + +AZURE_OPENAI_API_VERSION = '2023-07-01-preview' + + +class AzureOpenAIEmbedding(BaseEmbedding): + def __init__(self, model_provider: BaseModelProvider, name: str): + self.credentials = model_provider.get_model_credentials( + model_name=name, + model_type=self.type + ) + + client = OpenAIEmbeddings( + deployment=name, + openai_api_type='azure', + openai_api_version=AZURE_OPENAI_API_VERSION, + chunk_size=16, + max_retries=1, + **self.credentials + ) + + super().__init__(model_provider, client, name) + + def get_num_tokens(self, text: str) -> int: + """ + get num tokens of text. + + :param text: + :return: + """ + if len(text) == 0: + return 0 + + enc = tiktoken.encoding_for_model(self.credentials.get('base_model_name')) + + tokenized_text = enc.encode(text) + + # calculate the number of tokens in the encoded text + return len(tokenized_text) + + def get_token_price(self, tokens: int): + tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1k * decimal.Decimal('0.0001') + return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'USD' + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to Azure OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to Azure OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("Azure OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError('Azure ' + str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError('Azure ' + str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError('Azure ' + ex.__class__.__name__ + ":" + str(ex)) + else: + return ex diff --git a/api/core/model_providers/models/embedding/base.py b/api/core/model_providers/models/embedding/base.py new file mode 100644 index 000000000..fc42d88bc --- /dev/null +++ b/api/core/model_providers/models/embedding/base.py @@ -0,0 +1,40 @@ +from abc import abstractmethod +from typing import Any + +import tiktoken +from langchain.schema.language_model import _get_token_ids_default_method + +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import BaseModelProvider + + +class BaseEmbedding(BaseProviderModel): + name: str + type: ModelType = ModelType.EMBEDDINGS + + def __init__(self, model_provider: BaseModelProvider, client: Any, name: str): + super().__init__(model_provider, client) + self.name = name + + def get_num_tokens(self, text: str) -> int: + """ + get num tokens of text. + + :param text: + :return: + """ + if len(text) == 0: + return 0 + + return len(_get_token_ids_default_method(text)) + + def get_token_price(self, tokens: int): + return 0 + + def get_currency(self): + return 'USD' + + @abstractmethod + def handle_exceptions(self, ex: Exception) -> Exception: + raise NotImplementedError diff --git a/api/core/model_providers/models/embedding/minimax_embedding.py b/api/core/model_providers/models/embedding/minimax_embedding.py new file mode 100644 index 000000000..d8cb22f34 --- /dev/null +++ b/api/core/model_providers/models/embedding/minimax_embedding.py @@ -0,0 +1,35 @@ +import decimal +import logging + +from langchain.embeddings import MiniMaxEmbeddings + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.embedding.base import BaseEmbedding +from core.model_providers.providers.base import BaseModelProvider + + +class MinimaxEmbedding(BaseEmbedding): + def __init__(self, model_provider: BaseModelProvider, name: str): + credentials = model_provider.get_model_credentials( + model_name=name, + model_type=self.type + ) + + client = MiniMaxEmbeddings( + model=name, + **credentials + ) + + super().__init__(model_provider, client, name) + + def get_token_price(self, tokens: int): + return decimal.Decimal('0') + + def get_currency(self): + return 'RMB' + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, ValueError): + return LLMBadRequestError(f"Minimax: {str(ex)}") + else: + return ex diff --git a/api/core/model_providers/models/embedding/openai_embedding.py b/api/core/model_providers/models/embedding/openai_embedding.py new file mode 100644 index 000000000..1d7af94fd --- /dev/null +++ b/api/core/model_providers/models/embedding/openai_embedding.py @@ -0,0 +1,72 @@ +import decimal +import logging + +import openai +import tiktoken +from langchain.embeddings import OpenAIEmbeddings + +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError +from core.model_providers.models.embedding.base import BaseEmbedding +from core.model_providers.providers.base import BaseModelProvider + + +class OpenAIEmbedding(BaseEmbedding): + def __init__(self, model_provider: BaseModelProvider, name: str): + credentials = model_provider.get_model_credentials( + model_name=name, + model_type=self.type + ) + + client = OpenAIEmbeddings( + max_retries=1, + **credentials + ) + + super().__init__(model_provider, client, name) + + def get_num_tokens(self, text: str) -> int: + """ + get num tokens of text. + + :param text: + :return: + """ + if len(text) == 0: + return 0 + + enc = tiktoken.encoding_for_model(self.name) + + tokenized_text = enc.encode(text) + + # calculate the number of tokens in the encoded text + return len(tokenized_text) + + def get_token_price(self, tokens: int): + tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1k * decimal.Decimal('0.0001') + return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'USD' + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError(str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError(str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex)) + else: + return ex diff --git a/api/core/model_providers/models/embedding/replicate_embedding.py b/api/core/model_providers/models/embedding/replicate_embedding.py new file mode 100644 index 000000000..3f7ef2851 --- /dev/null +++ b/api/core/model_providers/models/embedding/replicate_embedding.py @@ -0,0 +1,36 @@ +import decimal + +from replicate.exceptions import ModelError, ReplicateError + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.providers.base import BaseModelProvider +from core.third_party.langchain.embeddings.replicate_embedding import ReplicateEmbeddings +from core.model_providers.models.embedding.base import BaseEmbedding + + +class ReplicateEmbedding(BaseEmbedding): + def __init__(self, model_provider: BaseModelProvider, name: str): + credentials = model_provider.get_model_credentials( + model_name=name, + model_type=self.type + ) + + client = ReplicateEmbeddings( + model=name + ':' + credentials.get('model_version'), + replicate_api_token=credentials.get('replicate_api_token') + ) + + super().__init__(model_provider, client, name) + + def get_token_price(self, tokens: int): + # replicate only pay for prediction seconds + return decimal.Decimal('0') + + def get_currency(self): + return 'USD' + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, (ModelError, ReplicateError)): + return LLMBadRequestError(f"Replicate: {str(ex)}") + else: + return ex diff --git a/api/tests/test_services/__init__.py b/api/core/model_providers/models/entity/__init__.py similarity index 100% rename from api/tests/test_services/__init__.py rename to api/core/model_providers/models/entity/__init__.py diff --git a/api/core/model_providers/models/entity/message.py b/api/core/model_providers/models/entity/message.py new file mode 100644 index 000000000..f2fab9c4b --- /dev/null +++ b/api/core/model_providers/models/entity/message.py @@ -0,0 +1,53 @@ +import enum + +from langchain.schema import HumanMessage, AIMessage, SystemMessage, BaseMessage +from pydantic import BaseModel + + +class LLMRunResult(BaseModel): + content: str + prompt_tokens: int + completion_tokens: int + + +class MessageType(enum.Enum): + HUMAN = 'human' + ASSISTANT = 'assistant' + SYSTEM = 'system' + + +class PromptMessage(BaseModel): + type: MessageType = MessageType.HUMAN + content: str = '' + + +def to_lc_messages(messages: list[PromptMessage]): + lc_messages = [] + for message in messages: + if message.type == MessageType.HUMAN: + lc_messages.append(HumanMessage(content=message.content)) + elif message.type == MessageType.ASSISTANT: + lc_messages.append(AIMessage(content=message.content)) + elif message.type == MessageType.SYSTEM: + lc_messages.append(SystemMessage(content=message.content)) + + return lc_messages + + +def to_prompt_messages(messages: list[BaseMessage]): + prompt_messages = [] + for message in messages: + if isinstance(message, HumanMessage): + prompt_messages.append(PromptMessage(content=message.content, type=MessageType.HUMAN)) + elif isinstance(message, AIMessage): + prompt_messages.append(PromptMessage(content=message.content, type=MessageType.ASSISTANT)) + elif isinstance(message, SystemMessage): + prompt_messages.append(PromptMessage(content=message.content, type=MessageType.SYSTEM)) + return prompt_messages + + +def str_to_prompt_messages(texts: list[str]): + prompt_messages = [] + for text in texts: + prompt_messages.append(PromptMessage(content=text)) + return prompt_messages diff --git a/api/core/model_providers/models/entity/model_params.py b/api/core/model_providers/models/entity/model_params.py new file mode 100644 index 000000000..2a6a1bc51 --- /dev/null +++ b/api/core/model_providers/models/entity/model_params.py @@ -0,0 +1,59 @@ +import enum +from typing import Optional, TypeVar, Generic + +from langchain.load.serializable import Serializable +from pydantic import BaseModel + + +class ModelMode(enum.Enum): + COMPLETION = 'completion' + CHAT = 'chat' + + +class ModelType(enum.Enum): + TEXT_GENERATION = 'text-generation' + EMBEDDINGS = 'embeddings' + SPEECH_TO_TEXT = 'speech2text' + IMAGE = 'image' + VIDEO = 'video' + MODERATION = 'moderation' + + @staticmethod + def value_of(value): + for member in ModelType: + if member.value == value: + return member + raise ValueError(f"No matching enum found for value '{value}'") + + +class ModelKwargs(BaseModel): + max_tokens: Optional[int] + temperature: Optional[float] + top_p: Optional[float] + presence_penalty: Optional[float] + frequency_penalty: Optional[float] + + +class KwargRuleType(enum.Enum): + STRING = 'string' + INTEGER = 'integer' + FLOAT = 'float' + + +T = TypeVar('T') + + +class KwargRule(Generic[T], BaseModel): + enabled: bool = True + min: Optional[T] = None + max: Optional[T] = None + default: Optional[T] = None + alias: Optional[str] = None + + +class ModelKwargsRules(BaseModel): + max_tokens: KwargRule = KwargRule[int](enabled=False) + temperature: KwargRule = KwargRule[float](enabled=False) + top_p: KwargRule = KwargRule[float](enabled=False) + presence_penalty: KwargRule = KwargRule[float](enabled=False) + frequency_penalty: KwargRule = KwargRule[float](enabled=False) diff --git a/api/core/model_providers/models/entity/provider.py b/api/core/model_providers/models/entity/provider.py new file mode 100644 index 000000000..07249eb37 --- /dev/null +++ b/api/core/model_providers/models/entity/provider.py @@ -0,0 +1,10 @@ +from enum import Enum + + +class ProviderQuotaUnit(Enum): + TIMES = 'times' + TOKENS = 'tokens' + + +class ModelFeature(Enum): + AGENT_THOUGHT = 'agent_thought' diff --git a/api/core/model_providers/models/llm/__init__.py b/api/core/model_providers/models/llm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/model_providers/models/llm/anthropic_model.py b/api/core/model_providers/models/llm/anthropic_model.py new file mode 100644 index 000000000..69dd76611 --- /dev/null +++ b/api/core/model_providers/models/llm/anthropic_model.py @@ -0,0 +1,107 @@ +import decimal +import logging +from functools import wraps +from typing import List, Optional, Any + +import anthropic +from langchain.callbacks.manager import Callbacks +from langchain.chat_models import ChatAnthropic +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + + +class AnthropicModel(BaseLLM): + model_mode: ModelMode = ModelMode.CHAT + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + return ChatAnthropic( + model=self.name, + streaming=self.streaming, + callbacks=self.callbacks, + default_request_timeout=60, + **self.credentials, + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return max(self._client.get_num_tokens_from_messages(prompts) - len(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + model_unit_prices = { + 'claude-instant-1': { + 'prompt': decimal.Decimal('1.63'), + 'completion': decimal.Decimal('5.51'), + }, + 'claude-2': { + 'prompt': decimal.Decimal('11.02'), + 'completion': decimal.Decimal('32.68'), + }, + } + + if message_type == MessageType.HUMAN or message_type == MessageType.SYSTEM: + unit_price = model_unit_prices[self.name]['prompt'] + else: + unit_price = model_unit_prices[self.name]['completion'] + + tokens_per_1m = (decimal.Decimal(tokens) / 1000000).quantize(decimal.Decimal('0.000001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1m * unit_price + return total_price.quantize(decimal.Decimal('0.00000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'USD' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, anthropic.APIConnectionError): + logging.warning("Failed to connect to Anthropic API.") + return LLMAPIConnectionError(f"Anthropic: The server could not be reached, cause: {ex.__cause__}") + elif isinstance(ex, anthropic.RateLimitError): + return LLMRateLimitError("Anthropic: A 429 status code was received; we should back off a bit.") + elif isinstance(ex, anthropic.AuthenticationError): + return LLMAuthorizationError(f"Anthropic: {ex.message}") + elif isinstance(ex, anthropic.BadRequestError): + return LLMBadRequestError(f"Anthropic: {ex.message}") + elif isinstance(ex, anthropic.APIStatusError): + return LLMAPIUnavailableError(f"Anthropic: code: {ex.status_code}, cause: {ex.message}") + else: + return ex + + @classmethod + def support_streaming(cls): + return True + diff --git a/api/core/model_providers/models/llm/azure_openai_model.py b/api/core/model_providers/models/llm/azure_openai_model.py new file mode 100644 index 000000000..b2f6159b4 --- /dev/null +++ b/api/core/model_providers/models/llm/azure_openai_model.py @@ -0,0 +1,177 @@ +import decimal +import logging +from functools import wraps +from typing import List, Optional, Any + +import openai +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult + +from core.model_providers.providers.base import BaseModelProvider +from core.third_party.langchain.llms.azure_chat_open_ai import EnhanceAzureChatOpenAI +from core.third_party.langchain.llms.azure_open_ai import EnhanceAzureOpenAI +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + +AZURE_OPENAI_API_VERSION = '2023-07-01-preview' + + +class AzureOpenAIModel(BaseLLM): + def __init__(self, model_provider: BaseModelProvider, + name: str, + model_kwargs: ModelKwargs, + streaming: bool = False, + callbacks: Callbacks = None): + if name == 'text-davinci-003': + self.model_mode = ModelMode.COMPLETION + else: + self.model_mode = ModelMode.CHAT + + super().__init__(model_provider, name, model_kwargs, streaming, callbacks) + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + if self.name == 'text-davinci-003': + client = EnhanceAzureOpenAI( + deployment_name=self.name, + streaming=self.streaming, + request_timeout=60, + openai_api_type='azure', + openai_api_version=AZURE_OPENAI_API_VERSION, + openai_api_key=self.credentials.get('openai_api_key'), + openai_api_base=self.credentials.get('openai_api_base'), + callbacks=self.callbacks, + **provider_model_kwargs + ) + else: + extra_model_kwargs = { + 'top_p': provider_model_kwargs.get('top_p'), + 'frequency_penalty': provider_model_kwargs.get('frequency_penalty'), + 'presence_penalty': provider_model_kwargs.get('presence_penalty'), + } + + client = EnhanceAzureChatOpenAI( + deployment_name=self.name, + temperature=provider_model_kwargs.get('temperature'), + max_tokens=provider_model_kwargs.get('max_tokens'), + model_kwargs=extra_model_kwargs, + streaming=self.streaming, + request_timeout=60, + openai_api_type='azure', + openai_api_version=AZURE_OPENAI_API_VERSION, + openai_api_key=self.credentials.get('openai_api_key'), + openai_api_base=self.credentials.get('openai_api_base'), + callbacks=self.callbacks, + ) + + return client + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + if isinstance(prompts, str): + return self._client.get_num_tokens(prompts) + else: + return max(self._client.get_num_tokens_from_messages(prompts) - len(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + model_unit_prices = { + 'gpt-4': { + 'prompt': decimal.Decimal('0.03'), + 'completion': decimal.Decimal('0.06'), + }, + 'gpt-4-32k': { + 'prompt': decimal.Decimal('0.06'), + 'completion': decimal.Decimal('0.12') + }, + 'gpt-35-turbo': { + 'prompt': decimal.Decimal('0.0015'), + 'completion': decimal.Decimal('0.002') + }, + 'gpt-35-turbo-16k': { + 'prompt': decimal.Decimal('0.003'), + 'completion': decimal.Decimal('0.004') + }, + 'text-davinci-003': { + 'prompt': decimal.Decimal('0.02'), + 'completion': decimal.Decimal('0.02') + }, + } + + base_model_name = self.credentials.get("base_model_name") + if message_type == MessageType.HUMAN or message_type == MessageType.SYSTEM: + unit_price = model_unit_prices[base_model_name]['prompt'] + else: + unit_price = model_unit_prices[base_model_name]['completion'] + + tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1k * unit_price + return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'USD' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + if self.name == 'text-davinci-003': + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + else: + extra_model_kwargs = { + 'top_p': provider_model_kwargs.get('top_p'), + 'frequency_penalty': provider_model_kwargs.get('frequency_penalty'), + 'presence_penalty': provider_model_kwargs.get('presence_penalty'), + } + + self.client.temperature = provider_model_kwargs.get('temperature') + self.client.max_tokens = provider_model_kwargs.get('max_tokens') + self.client.model_kwargs = extra_model_kwargs + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to Azure OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to Azure OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("Azure OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError('Azure ' + str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError('Azure ' + str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError('Azure ' + ex.__class__.__name__ + ":" + str(ex)) + else: + return ex + + @classmethod + def support_streaming(cls): + return True \ No newline at end of file diff --git a/api/core/model_providers/models/llm/base.py b/api/core/model_providers/models/llm/base.py new file mode 100644 index 000000000..31573dd58 --- /dev/null +++ b/api/core/model_providers/models/llm/base.py @@ -0,0 +1,269 @@ +from abc import abstractmethod +from typing import List, Optional, Any, Union + +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult, SystemMessage, AIMessage, HumanMessage, BaseMessage, ChatGeneration + +from core.callback_handler.std_out_callback_handler import DifyStreamingStdOutCallbackHandler, DifyStdOutCallbackHandler +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.message import PromptMessage, MessageType, LLMRunResult +from core.model_providers.models.entity.model_params import ModelType, ModelKwargs, ModelMode, ModelKwargsRules +from core.model_providers.providers.base import BaseModelProvider +from core.third_party.langchain.llms.fake import FakeLLM + + +class BaseLLM(BaseProviderModel): + model_mode: ModelMode = ModelMode.COMPLETION + name: str + model_kwargs: ModelKwargs + credentials: dict + streaming: bool = False + type: ModelType = ModelType.TEXT_GENERATION + deduct_quota: bool = True + + def __init__(self, model_provider: BaseModelProvider, + name: str, + model_kwargs: ModelKwargs, + streaming: bool = False, + callbacks: Callbacks = None): + self.name = name + self.model_rules = model_provider.get_model_parameter_rules(name, self.type) + self.model_kwargs = model_kwargs if model_kwargs else ModelKwargs( + max_tokens=None, + temperature=None, + top_p=None, + presence_penalty=None, + frequency_penalty=None + ) + self.credentials = model_provider.get_model_credentials( + model_name=name, + model_type=self.type + ) + self.streaming = streaming + + if streaming: + default_callback = DifyStreamingStdOutCallbackHandler() + else: + default_callback = DifyStdOutCallbackHandler() + + if not callbacks: + callbacks = [default_callback] + else: + callbacks.append(default_callback) + + self.callbacks = callbacks + + client = self._init_client() + super().__init__(model_provider, client) + + @abstractmethod + def _init_client(self) -> Any: + raise NotImplementedError + + def run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMRunResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + if self.deduct_quota: + self.model_provider.check_quota_over_limit() + + if not callbacks: + callbacks = self.callbacks + else: + callbacks.extend(self.callbacks) + + if 'fake_response' in kwargs and kwargs['fake_response']: + prompts = self._get_prompt_from_messages(messages, ModelMode.CHAT) + fake_llm = FakeLLM( + response=kwargs['fake_response'], + num_token_func=self.get_num_tokens, + streaming=self.streaming, + callbacks=callbacks + ) + result = fake_llm.generate([prompts]) + else: + try: + result = self._run( + messages=messages, + stop=stop, + callbacks=callbacks if not (self.streaming and not self.support_streaming()) else None, + **kwargs + ) + except Exception as ex: + raise self.handle_exceptions(ex) + + if isinstance(result.generations[0][0], ChatGeneration): + completion_content = result.generations[0][0].message.content + else: + completion_content = result.generations[0][0].text + + if self.streaming and not self.support_streaming(): + # use FakeLLM to simulate streaming when current model not support streaming but streaming is True + prompts = self._get_prompt_from_messages(messages, ModelMode.CHAT) + fake_llm = FakeLLM( + response=completion_content, + num_token_func=self.get_num_tokens, + streaming=self.streaming, + callbacks=callbacks + ) + fake_llm.generate([prompts]) + + if result.llm_output and result.llm_output['token_usage']: + prompt_tokens = result.llm_output['token_usage']['prompt_tokens'] + completion_tokens = result.llm_output['token_usage']['completion_tokens'] + total_tokens = result.llm_output['token_usage']['total_tokens'] + else: + prompt_tokens = self.get_num_tokens(messages) + completion_tokens = self.get_num_tokens([PromptMessage(content=completion_content, type=MessageType.ASSISTANT)]) + total_tokens = prompt_tokens + completion_tokens + + if self.deduct_quota: + self.model_provider.deduct_quota(total_tokens) + + return LLMRunResult( + content=completion_content, + prompt_tokens=prompt_tokens, + completion_tokens=completion_tokens + ) + + @abstractmethod + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_token_price(self, tokens: int, message_type: MessageType): + """ + get token price. + + :param tokens: + :param message_type: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_currency(self): + """ + get token currency. + + :return: + """ + raise NotImplementedError + + def get_model_kwargs(self): + return self.model_kwargs + + def set_model_kwargs(self, model_kwargs: ModelKwargs): + self.model_kwargs = model_kwargs + self._set_model_kwargs(model_kwargs) + + @abstractmethod + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + raise NotImplementedError + + @abstractmethod + def handle_exceptions(self, ex: Exception) -> Exception: + """ + Handle llm run exceptions. + + :param ex: + :return: + """ + raise NotImplementedError + + def add_callbacks(self, callbacks: Callbacks): + """ + Add callbacks to client. + + :param callbacks: + :return: + """ + if not self.client.callbacks: + self.client.callbacks = callbacks + else: + self.client.callbacks.extend(callbacks) + + @classmethod + def support_streaming(cls): + return False + + def _get_prompt_from_messages(self, messages: List[PromptMessage], + model_mode: Optional[ModelMode] = None) -> Union[str | List[BaseMessage]]: + if len(messages) == 0: + raise ValueError("prompt must not be empty.") + + if not model_mode: + model_mode = self.model_mode + + if model_mode == ModelMode.COMPLETION: + return messages[0].content + else: + chat_messages = [] + for message in messages: + if message.type == MessageType.HUMAN: + chat_messages.append(HumanMessage(content=message.content)) + elif message.type == MessageType.ASSISTANT: + chat_messages.append(AIMessage(content=message.content)) + elif message.type == MessageType.SYSTEM: + chat_messages.append(SystemMessage(content=message.content)) + + return chat_messages + + def _to_model_kwargs_input(self, model_rules: ModelKwargsRules, model_kwargs: ModelKwargs) -> dict: + """ + convert model kwargs to provider model kwargs. + + :param model_rules: + :param model_kwargs: + :return: + """ + model_kwargs_input = {} + for key, value in model_kwargs.dict().items(): + rule = getattr(model_rules, key) + if not rule.enabled: + continue + + if rule.alias: + key = rule.alias + + if rule.default is not None and value is None: + value = rule.default + + if rule.min is not None: + value = max(value, rule.min) + + if rule.max is not None: + value = min(value, rule.max) + + model_kwargs_input[key] = value + + return model_kwargs_input diff --git a/api/core/model_providers/models/llm/chatglm_model.py b/api/core/model_providers/models/llm/chatglm_model.py new file mode 100644 index 000000000..42036dbfd --- /dev/null +++ b/api/core/model_providers/models/llm/chatglm_model.py @@ -0,0 +1,70 @@ +import decimal +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.llms import ChatGLM +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + + +class ChatGLMModel(BaseLLM): + model_mode: ModelMode = ModelMode.COMPLETION + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + return ChatGLM( + callbacks=self.callbacks, + endpoint_url=self.credentials.get('api_base'), + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return max(self._client.get_num_tokens(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + return decimal.Decimal('0') + + def get_currency(self): + return 'RMB' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, ValueError): + return LLMBadRequestError(f"ChatGLM: {str(ex)}") + else: + return ex + + @classmethod + def support_streaming(cls): + return False diff --git a/api/core/model_providers/models/llm/huggingface_hub_model.py b/api/core/model_providers/models/llm/huggingface_hub_model.py new file mode 100644 index 000000000..f5deded51 --- /dev/null +++ b/api/core/model_providers/models/llm/huggingface_hub_model.py @@ -0,0 +1,82 @@ +import decimal +from functools import wraps +from typing import List, Optional, Any + +from langchain import HuggingFaceHub +from langchain.callbacks.manager import Callbacks +from langchain.llms import HuggingFaceEndpoint +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + + +class HuggingfaceHubModel(BaseLLM): + model_mode: ModelMode = ModelMode.COMPLETION + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + if self.credentials['huggingfacehub_api_type'] == 'inference_endpoints': + client = HuggingFaceEndpoint( + endpoint_url=self.credentials['huggingfacehub_endpoint_url'], + task='text2text-generation', + model_kwargs=provider_model_kwargs, + huggingfacehub_api_token=self.credentials['huggingfacehub_api_token'], + callbacks=self.callbacks, + ) + else: + client = HuggingFaceHub( + repo_id=self.name, + task=self.credentials['task_type'], + model_kwargs=provider_model_kwargs, + huggingfacehub_api_token=self.credentials['huggingfacehub_api_token'], + callbacks=self.callbacks, + ) + + return client + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.get_num_tokens(prompts) + + def get_token_price(self, tokens: int, message_type: MessageType): + # not support calc price + return decimal.Decimal('0') + + def get_currency(self): + return 'USD' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + self.client.model_kwargs = provider_model_kwargs + + def handle_exceptions(self, ex: Exception) -> Exception: + return LLMBadRequestError(f"Huggingface Hub: {str(ex)}") + + @classmethod + def support_streaming(cls): + return False + diff --git a/api/core/model_providers/models/llm/minimax_model.py b/api/core/model_providers/models/llm/minimax_model.py new file mode 100644 index 000000000..b7e38462f --- /dev/null +++ b/api/core/model_providers/models/llm/minimax_model.py @@ -0,0 +1,70 @@ +import decimal +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.llms import Minimax +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + + +class MinimaxModel(BaseLLM): + model_mode: ModelMode = ModelMode.COMPLETION + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + return Minimax( + model=self.name, + model_kwargs={ + 'stream': False + }, + callbacks=self.callbacks, + **self.credentials, + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return max(self._client.get_num_tokens(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + return decimal.Decimal('0') + + def get_currency(self): + return 'RMB' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, ValueError): + return LLMBadRequestError(f"Minimax: {str(ex)}") + else: + return ex diff --git a/api/core/model_providers/models/llm/openai_model.py b/api/core/model_providers/models/llm/openai_model.py new file mode 100644 index 000000000..e3dab3e9d --- /dev/null +++ b/api/core/model_providers/models/llm/openai_model.py @@ -0,0 +1,219 @@ +import decimal +import logging +from typing import List, Optional, Any + +import openai +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult + +from core.model_providers.providers.base import BaseModelProvider +from core.third_party.langchain.llms.chat_open_ai import EnhanceChatOpenAI +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError, ModelCurrentlyNotSupportError +from core.third_party.langchain.llms.open_ai import EnhanceOpenAI +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs +from models.provider import ProviderType, ProviderQuotaType + +COMPLETION_MODELS = [ + 'text-davinci-003', # 4,097 tokens +] + +CHAT_MODELS = [ + 'gpt-4', # 8,192 tokens + 'gpt-4-32k', # 32,768 tokens + 'gpt-3.5-turbo', # 4,096 tokens + 'gpt-3.5-turbo-16k', # 16,384 tokens +] + +MODEL_MAX_TOKENS = { + 'gpt-4': 8192, + 'gpt-4-32k': 32768, + 'gpt-3.5-turbo': 4096, + 'gpt-3.5-turbo-16k': 16384, + 'text-davinci-003': 4097, +} + + +class OpenAIModel(BaseLLM): + def __init__(self, model_provider: BaseModelProvider, + name: str, + model_kwargs: ModelKwargs, + streaming: bool = False, + callbacks: Callbacks = None): + if name in COMPLETION_MODELS: + self.model_mode = ModelMode.COMPLETION + else: + self.model_mode = ModelMode.CHAT + + super().__init__(model_provider, name, model_kwargs, streaming, callbacks) + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + if self.name in COMPLETION_MODELS: + client = EnhanceOpenAI( + model_name=self.name, + streaming=self.streaming, + callbacks=self.callbacks, + request_timeout=60, + **self.credentials, + **provider_model_kwargs + ) + else: + # Fine-tuning is currently only available for the following base models: + # davinci, curie, babbage, and ada. + # This means that except for the fixed `completion` model, + # all other fine-tuned models are `completion` models. + extra_model_kwargs = { + 'top_p': provider_model_kwargs.get('top_p'), + 'frequency_penalty': provider_model_kwargs.get('frequency_penalty'), + 'presence_penalty': provider_model_kwargs.get('presence_penalty'), + } + + client = EnhanceChatOpenAI( + model_name=self.name, + temperature=provider_model_kwargs.get('temperature'), + max_tokens=provider_model_kwargs.get('max_tokens'), + model_kwargs=extra_model_kwargs, + streaming=self.streaming, + callbacks=self.callbacks, + request_timeout=60, + **self.credentials + ) + + return client + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + if self.name == 'gpt-4' \ + and self.model_provider.provider.provider_type == ProviderType.SYSTEM.value \ + and self.model_provider.provider.quota_type == ProviderQuotaType.TRIAL.value: + raise ModelCurrentlyNotSupportError("Dify Hosted OpenAI GPT-4 currently not support.") + + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + if isinstance(prompts, str): + return self._client.get_num_tokens(prompts) + else: + return max(self._client.get_num_tokens_from_messages(prompts) - len(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + model_unit_prices = { + 'gpt-4': { + 'prompt': decimal.Decimal('0.03'), + 'completion': decimal.Decimal('0.06'), + }, + 'gpt-4-32k': { + 'prompt': decimal.Decimal('0.06'), + 'completion': decimal.Decimal('0.12') + }, + 'gpt-3.5-turbo': { + 'prompt': decimal.Decimal('0.0015'), + 'completion': decimal.Decimal('0.002') + }, + 'gpt-3.5-turbo-16k': { + 'prompt': decimal.Decimal('0.003'), + 'completion': decimal.Decimal('0.004') + }, + 'text-davinci-003': { + 'prompt': decimal.Decimal('0.02'), + 'completion': decimal.Decimal('0.02') + }, + } + + if message_type == MessageType.HUMAN or message_type == MessageType.SYSTEM: + unit_price = model_unit_prices[self.name]['prompt'] + else: + unit_price = model_unit_prices[self.name]['completion'] + + tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1k * unit_price + return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'USD' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + if self.name in COMPLETION_MODELS: + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + else: + extra_model_kwargs = { + 'top_p': provider_model_kwargs.get('top_p'), + 'frequency_penalty': provider_model_kwargs.get('frequency_penalty'), + 'presence_penalty': provider_model_kwargs.get('presence_penalty'), + } + + self.client.temperature = provider_model_kwargs.get('temperature') + self.client.max_tokens = provider_model_kwargs.get('max_tokens') + self.client.model_kwargs = extra_model_kwargs + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError(str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError(str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex)) + else: + return ex + + @classmethod + def support_streaming(cls): + return True + + # def is_model_valid_or_raise(self): + # """ + # check is a valid model. + # + # :return: + # """ + # credentials = self._model_provider.get_credentials() + # + # try: + # result = openai.Model.retrieve( + # id=self.name, + # api_key=credentials.get('openai_api_key'), + # request_timeout=60 + # ) + # + # if 'id' not in result or result['id'] != self.name: + # raise LLMNotExistsError(f"OpenAI Model {self.name} not exists.") + # except openai.error.OpenAIError as e: + # raise LLMNotExistsError(f"OpenAI Model {self.name} not exists, cause: {e.__class__.__name__}:{str(e)}") + # except Exception as e: + # logging.exception("OpenAI Model retrieve failed.") + # raise e diff --git a/api/core/model_providers/models/llm/replicate_model.py b/api/core/model_providers/models/llm/replicate_model.py new file mode 100644 index 000000000..7dd7eb853 --- /dev/null +++ b/api/core/model_providers/models/llm/replicate_model.py @@ -0,0 +1,103 @@ +import decimal +from functools import wraps +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult, get_buffer_string +from replicate.exceptions import ReplicateError, ModelError + +from core.model_providers.providers.base import BaseModelProvider +from core.model_providers.error import LLMBadRequestError +from core.third_party.langchain.llms.replicate_llm import EnhanceReplicate +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs + + +class ReplicateModel(BaseLLM): + def __init__(self, model_provider: BaseModelProvider, + name: str, + model_kwargs: ModelKwargs, + streaming: bool = False, + callbacks: Callbacks = None): + self.model_mode = ModelMode.CHAT if name.endswith('-chat') else ModelMode.COMPLETION + + super().__init__(model_provider, name, model_kwargs, streaming, callbacks) + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + + return EnhanceReplicate( + model=self.name + ':' + self.credentials.get('model_version'), + input=provider_model_kwargs, + streaming=self.streaming, + replicate_api_token=self.credentials.get('replicate_api_token'), + callbacks=self.callbacks, + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + extra_kwargs = {} + if isinstance(prompts, list): + system_messages = [message for message in messages if message.type == 'system'] + if system_messages: + system_message = system_messages[0] + extra_kwargs['system_prompt'] = system_message.content + prompts = [message for message in messages if message.type != 'system'] + + prompts = get_buffer_string(prompts) + + # The maximum length the generated tokens can have. + # Corresponds to the length of the input prompt + max_new_tokens. + if 'max_length' in self._client.input: + self._client.input['max_length'] = min( + self._client.input['max_length'] + self.get_num_tokens(messages), + self.model_rules.max_tokens.max + ) + + return self._client.generate([prompts], stop, callbacks, **extra_kwargs) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + if isinstance(prompts, list): + prompts = get_buffer_string(prompts) + + return self._client.get_num_tokens(prompts) + + def get_token_price(self, tokens: int, message_type: MessageType): + # replicate only pay for prediction seconds + return decimal.Decimal('0') + + def get_currency(self): + return 'USD' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + self.client.input = provider_model_kwargs + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, (ModelError, ReplicateError)): + return LLMBadRequestError(f"Replicate: {str(ex)}") + else: + return ex + + @classmethod + def support_streaming(cls): + return True \ No newline at end of file diff --git a/api/core/model_providers/models/llm/spark_model.py b/api/core/model_providers/models/llm/spark_model.py new file mode 100644 index 000000000..5d8c97c46 --- /dev/null +++ b/api/core/model_providers/models/llm/spark_model.py @@ -0,0 +1,73 @@ +import decimal +from functools import wraps +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs +from core.third_party.langchain.llms.spark import ChatSpark +from core.third_party.spark.spark_llm import SparkError + + +class SparkModel(BaseLLM): + model_mode: ModelMode = ModelMode.CHAT + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + return ChatSpark( + streaming=self.streaming, + callbacks=self.callbacks, + **self.credentials, + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + contents = [message.content for message in messages] + return max(self._client.get_num_tokens("".join(contents)), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + return decimal.Decimal('0') + + def get_currency(self): + return 'RMB' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, SparkError): + return LLMBadRequestError(f"Spark: {str(ex)}") + else: + return ex + + @classmethod + def support_streaming(cls): + return True \ No newline at end of file diff --git a/api/core/model_providers/models/llm/tongyi_model.py b/api/core/model_providers/models/llm/tongyi_model.py new file mode 100644 index 000000000..f950275f7 --- /dev/null +++ b/api/core/model_providers/models/llm/tongyi_model.py @@ -0,0 +1,77 @@ +import decimal +from functools import wraps +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult +from requests import HTTPError + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs +from core.third_party.langchain.llms.tongyi_llm import EnhanceTongyi + + +class TongyiModel(BaseLLM): + model_mode: ModelMode = ModelMode.COMPLETION + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + del provider_model_kwargs['max_tokens'] + return EnhanceTongyi( + model_name=self.name, + max_retries=1, + streaming=self.streaming, + callbacks=self.callbacks, + **self.credentials, + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return max(self._client.get_num_tokens(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + return decimal.Decimal('0') + + def get_currency(self): + return 'RMB' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + del provider_model_kwargs['max_tokens'] + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, (ValueError, HTTPError)): + return LLMBadRequestError(f"Tongyi: {str(ex)}") + else: + return ex + + @classmethod + def support_streaming(cls): + return True diff --git a/api/core/model_providers/models/llm/wenxin_model.py b/api/core/model_providers/models/llm/wenxin_model.py new file mode 100644 index 000000000..2c950679a --- /dev/null +++ b/api/core/model_providers/models/llm/wenxin_model.py @@ -0,0 +1,92 @@ +import decimal +from typing import List, Optional, Any + +from langchain.callbacks.manager import Callbacks +from langchain.schema import LLMResult + +from core.model_providers.error import LLMBadRequestError +from core.model_providers.models.llm.base import BaseLLM +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelMode, ModelKwargs +from core.third_party.langchain.llms.wenxin import Wenxin + + +class WenxinModel(BaseLLM): + model_mode: ModelMode = ModelMode.COMPLETION + + def _init_client(self) -> Any: + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, self.model_kwargs) + return Wenxin( + streaming=self.streaming, + callbacks=self.callbacks, + **self.credentials, + **provider_model_kwargs + ) + + def _run(self, messages: List[PromptMessage], + stop: Optional[List[str]] = None, + callbacks: Callbacks = None, + **kwargs) -> LLMResult: + """ + run predict by prompt messages and stop words. + + :param messages: + :param stop: + :param callbacks: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return self._client.generate([prompts], stop, callbacks) + + def get_num_tokens(self, messages: List[PromptMessage]) -> int: + """ + get num tokens of prompt messages. + + :param messages: + :return: + """ + prompts = self._get_prompt_from_messages(messages) + return max(self._client.get_num_tokens(prompts), 0) + + def get_token_price(self, tokens: int, message_type: MessageType): + model_unit_prices = { + 'ernie-bot': { + 'prompt': decimal.Decimal('0.012'), + 'completion': decimal.Decimal('0.012'), + }, + 'ernie-bot-turbo': { + 'prompt': decimal.Decimal('0.008'), + 'completion': decimal.Decimal('0.008') + }, + 'bloomz-7b': { + 'prompt': decimal.Decimal('0.006'), + 'completion': decimal.Decimal('0.006') + } + } + + if message_type == MessageType.HUMAN or message_type == MessageType.SYSTEM: + unit_price = model_unit_prices[self.name]['prompt'] + else: + unit_price = model_unit_prices[self.name]['completion'] + + tokens_per_1k = (decimal.Decimal(tokens) / 1000).quantize(decimal.Decimal('0.001'), + rounding=decimal.ROUND_HALF_UP) + + total_price = tokens_per_1k * unit_price + return total_price.quantize(decimal.Decimal('0.0000001'), rounding=decimal.ROUND_HALF_UP) + + def get_currency(self): + return 'RMB' + + def _set_model_kwargs(self, model_kwargs: ModelKwargs): + provider_model_kwargs = self._to_model_kwargs_input(self.model_rules, model_kwargs) + for k, v in provider_model_kwargs.items(): + if hasattr(self.client, k): + setattr(self.client, k, v) + + def handle_exceptions(self, ex: Exception) -> Exception: + return LLMBadRequestError(f"Wenxin: {str(ex)}") + + @classmethod + def support_streaming(cls): + return False diff --git a/api/core/model_providers/models/moderation/__init__.py b/api/core/model_providers/models/moderation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/model_providers/models/moderation/openai_moderation.py b/api/core/model_providers/models/moderation/openai_moderation.py new file mode 100644 index 000000000..c1e792966 --- /dev/null +++ b/api/core/model_providers/models/moderation/openai_moderation.py @@ -0,0 +1,48 @@ +import logging + +import openai + +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import BaseModelProvider + +DEFAULT_AUDIO_MODEL = 'whisper-1' + + +class OpenAIModeration(BaseProviderModel): + type: ModelType = ModelType.MODERATION + + def __init__(self, model_provider: BaseModelProvider, name: str): + super().__init__(model_provider, openai.Moderation) + + def run(self, text): + credentials = self.model_provider.get_model_credentials( + model_name=DEFAULT_AUDIO_MODEL, + model_type=self.type + ) + + try: + return self._client.create(input=text, api_key=credentials['openai_api_key']) + except Exception as ex: + raise self.handle_exceptions(ex) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError(str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError(str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex)) + else: + return ex diff --git a/api/core/model_providers/models/speech2text/__init__.py b/api/core/model_providers/models/speech2text/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/model_providers/models/speech2text/base.py b/api/core/model_providers/models/speech2text/base.py new file mode 100644 index 000000000..0b1ec1d55 --- /dev/null +++ b/api/core/model_providers/models/speech2text/base.py @@ -0,0 +1,29 @@ +from abc import abstractmethod +from typing import Any + +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import BaseModelProvider + + +class BaseSpeech2Text(BaseProviderModel): + name: str + type: ModelType = ModelType.SPEECH_TO_TEXT + + def __init__(self, model_provider: BaseModelProvider, client: Any, name: str): + super().__init__(model_provider, client) + self.name = name + + def run(self, file): + try: + return self._run(file) + except Exception as ex: + raise self.handle_exceptions(ex) + + @abstractmethod + def _run(self, file): + raise NotImplementedError + + @abstractmethod + def handle_exceptions(self, ex: Exception) -> Exception: + raise NotImplementedError diff --git a/api/core/model_providers/models/speech2text/openai_whisper.py b/api/core/model_providers/models/speech2text/openai_whisper.py new file mode 100644 index 000000000..8bca2aaa6 --- /dev/null +++ b/api/core/model_providers/models/speech2text/openai_whisper.py @@ -0,0 +1,47 @@ +import logging + +import openai + +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, \ + LLMRateLimitError, LLMAuthorizationError +from core.model_providers.models.speech2text.base import BaseSpeech2Text +from core.model_providers.providers.base import BaseModelProvider + + +class OpenAIWhisper(BaseSpeech2Text): + + def __init__(self, model_provider: BaseModelProvider, name: str): + super().__init__(model_provider, openai.Audio, name) + + def _run(self, file): + credentials = self.model_provider.get_model_credentials( + model_name=self.name, + model_type=self.type + ) + + return self._client.transcribe( + model=self.name, + file=file, + api_key=credentials.get('openai_api_key'), + api_base=credentials.get('openai_api_base'), + organization=credentials.get('openai_organization'), + ) + + def handle_exceptions(self, ex: Exception) -> Exception: + if isinstance(ex, openai.error.InvalidRequestError): + logging.warning("Invalid request to OpenAI API.") + return LLMBadRequestError(str(ex)) + elif isinstance(ex, openai.error.APIConnectionError): + logging.warning("Failed to connect to OpenAI API.") + return LLMAPIConnectionError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, (openai.error.APIError, openai.error.ServiceUnavailableError, openai.error.Timeout)): + logging.warning("OpenAI service unavailable.") + return LLMAPIUnavailableError(ex.__class__.__name__ + ":" + str(ex)) + elif isinstance(ex, openai.error.RateLimitError): + return LLMRateLimitError(str(ex)) + elif isinstance(ex, openai.error.AuthenticationError): + raise LLMAuthorizationError(str(ex)) + elif isinstance(ex, openai.error.OpenAIError): + return LLMBadRequestError(ex.__class__.__name__ + ":" + str(ex)) + else: + return ex diff --git a/api/core/model_providers/providers/__init__.py b/api/core/model_providers/providers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/model_providers/providers/anthropic_provider.py b/api/core/model_providers/providers/anthropic_provider.py new file mode 100644 index 000000000..8daeff44e --- /dev/null +++ b/api/core/model_providers/providers/anthropic_provider.py @@ -0,0 +1,224 @@ +import json +import logging +from json import JSONDecodeError +from typing import Type, Optional + +import anthropic +from flask import current_app +from langchain.chat_models import ChatAnthropic +from langchain.schema import HumanMessage + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule +from core.model_providers.models.entity.provider import ModelFeature +from core.model_providers.models.llm.anthropic_model import AnthropicModel +from core.model_providers.models.llm.base import ModelType +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.model_providers.providers.hosted import hosted_model_providers +from models.provider import ProviderType + + +class AnthropicProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'anthropic' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'claude-instant-1', + 'name': 'claude-instant-1', + }, + { + 'id': 'claude-2', + 'name': 'claude-2', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = AnthropicModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=1, default=1), + top_p=KwargRule[float](min=0, max=1, default=0.7), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](alias="max_tokens_to_sample", min=10, max=100000, default=256), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'anthropic_api_key' not in credentials: + raise CredentialsValidateFailedError('Anthropic API Key must be provided.') + + try: + credential_kwargs = { + 'anthropic_api_key': credentials['anthropic_api_key'] + } + + if 'anthropic_api_url' in credentials: + credential_kwargs['anthropic_api_url'] = credentials['anthropic_api_url'] + + chat_llm = ChatAnthropic( + model='claude-instant-1', + max_tokens_to_sample=10, + temperature=0, + default_request_timeout=60, + **credential_kwargs + ) + + messages = [ + HumanMessage( + content="ping" + ) + ] + + chat_llm(messages) + except anthropic.APIConnectionError as ex: + raise CredentialsValidateFailedError(str(ex)) + except (anthropic.APIStatusError, anthropic.RateLimitError) as ex: + raise CredentialsValidateFailedError(str(ex)) + except Exception as ex: + logging.exception('Anthropic config validation failed') + raise ex + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['anthropic_api_key'] = encrypter.encrypt_token(tenant_id, credentials['anthropic_api_key']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'anthropic_api_url': None, + 'anthropic_api_key': None + } + + if credentials['anthropic_api_key']: + credentials['anthropic_api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['anthropic_api_key'] + ) + + if obfuscated: + credentials['anthropic_api_key'] = encrypter.obfuscated_token(credentials['anthropic_api_key']) + + if 'anthropic_api_url' not in credentials: + credentials['anthropic_api_url'] = None + + return credentials + else: + if hosted_model_providers.anthropic: + return { + 'anthropic_api_url': hosted_model_providers.anthropic.api_base, + 'anthropic_api_key': hosted_model_providers.anthropic.api_key, + } + else: + return { + 'anthropic_api_url': None, + 'anthropic_api_key': None + } + + @classmethod + def is_provider_type_system_supported(cls) -> bool: + if current_app.config['EDITION'] != 'CLOUD': + return False + + if hosted_model_providers.anthropic: + return True + + return False + + def should_deduct_quota(self): + if hosted_model_providers.anthropic and \ + hosted_model_providers.anthropic.quota_limit and hosted_model_providers.anthropic.quota_limit > 0: + return True + + return False + + def get_payment_info(self) -> Optional[dict]: + """ + get product info if it payable. + + :return: + """ + if hosted_model_providers.anthropic \ + and hosted_model_providers.anthropic.paid_enabled: + return { + 'product_id': hosted_model_providers.anthropic.paid_stripe_price_id, + 'increase_quota': hosted_model_providers.anthropic.paid_increase_quota, + } + + return None + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/azure_openai_provider.py b/api/core/model_providers/providers/azure_openai_provider.py new file mode 100644 index 000000000..3dbb78237 --- /dev/null +++ b/api/core/model_providers/providers/azure_openai_provider.py @@ -0,0 +1,387 @@ +import json +import logging +from json import JSONDecodeError +from typing import Type + +import openai +from flask import current_app +from langchain.embeddings import OpenAIEmbeddings +from langchain.schema import HumanMessage + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.embedding.azure_openai_embedding import AzureOpenAIEmbedding, \ + AZURE_OPENAI_API_VERSION +from core.model_providers.models.entity.model_params import ModelType, ModelKwargsRules, KwargRule +from core.model_providers.models.entity.provider import ModelFeature +from core.model_providers.models.llm.azure_openai_model import AzureOpenAIModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.model_providers.providers.hosted import hosted_model_providers +from core.third_party.langchain.llms.azure_chat_open_ai import EnhanceAzureChatOpenAI +from extensions.ext_database import db +from models.provider import ProviderType, ProviderModel, ProviderQuotaType + +BASE_MODELS = [ + 'gpt-4', + 'gpt-4-32k', + 'gpt-35-turbo', + 'gpt-35-turbo-16k', + 'text-davinci-003', + 'text-embedding-ada-002', +] + + +class AzureOpenAIProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'azure_openai' + + def get_supported_model_list(self, model_type: ModelType) -> list[dict]: + # convert old provider config to provider models + self._convert_provider_config_to_model_config() + + if self.provider.provider_type == ProviderType.CUSTOM.value: + # get configurable provider models + provider_models = db.session.query(ProviderModel).filter( + ProviderModel.tenant_id == self.provider.tenant_id, + ProviderModel.provider_name == self.provider.provider_name, + ProviderModel.model_type == model_type.value, + ProviderModel.is_valid == True + ).order_by(ProviderModel.created_at.asc()).all() + + model_list = [] + for provider_model in provider_models: + model_dict = { + 'id': provider_model.model_name, + 'name': provider_model.model_name + } + + credentials = json.loads(provider_model.encrypted_config) + if credentials['base_model_name'] in [ + 'gpt-4', + 'gpt-4-32k', + 'gpt-35-turbo', + 'gpt-35-turbo-16k', + ]: + model_dict['features'] = [ + ModelFeature.AGENT_THOUGHT.value + ] + + model_list.append(model_dict) + else: + model_list = self._get_fixed_model_list(model_type) + + return model_list + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + models = [ + { + 'id': 'gpt-3.5-turbo', + 'name': 'gpt-3.5-turbo', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-3.5-turbo-16k', + 'name': 'gpt-3.5-turbo-16k', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-4', + 'name': 'gpt-4', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-4-32k', + 'name': 'gpt-4-32k', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'text-davinci-003', + 'name': 'text-davinci-003', + } + ] + + if self.provider.provider_type == ProviderType.SYSTEM.value \ + and self.provider.quota_type == ProviderQuotaType.TRIAL.value: + models = [item for item in models if item['id'] not in ['gpt-4', 'gpt-4-32k']] + + return models + elif model_type == ModelType.EMBEDDINGS: + return [ + { + 'id': 'text-embedding-ada-002', + 'name': 'text-embedding-ada-002' + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = AzureOpenAIModel + elif model_type == ModelType.EMBEDDINGS: + model_class = AzureOpenAIEmbedding + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + base_model_max_tokens = { + 'gpt-4': 8192, + 'gpt-4-32k': 32768, + 'gpt-35-turbo': 4096, + 'gpt-35-turbo-16k': 16384, + 'text-davinci-003': 4097, + } + + model_credentials = self.get_model_credentials(model_name, model_type) + + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=2, default=1), + top_p=KwargRule[float](min=0, max=1, default=1), + presence_penalty=KwargRule[float](min=-2, max=2, default=0), + frequency_penalty=KwargRule[float](min=-2, max=2, default=0), + max_tokens=KwargRule[int](min=10, max=base_model_max_tokens.get( + model_credentials['base_model_name'], + 4097 + ), default=16), + ) + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + if 'openai_api_key' not in credentials: + raise CredentialsValidateFailedError('Azure OpenAI API key is required') + + if 'openai_api_base' not in credentials: + raise CredentialsValidateFailedError('Azure OpenAI API Base Endpoint is required') + + if 'base_model_name' not in credentials: + raise CredentialsValidateFailedError('Base Model Name is required') + + if credentials['base_model_name'] not in BASE_MODELS: + raise CredentialsValidateFailedError('Base Model Name is invalid') + + if model_type == ModelType.TEXT_GENERATION: + try: + client = EnhanceAzureChatOpenAI( + deployment_name=model_name, + temperature=0, + max_tokens=15, + request_timeout=10, + openai_api_type='azure', + openai_api_version='2023-07-01-preview', + openai_api_key=credentials['openai_api_key'], + openai_api_base=credentials['openai_api_base'], + ) + + client.generate([[HumanMessage(content='hi!')]]) + except openai.error.OpenAIError as e: + raise CredentialsValidateFailedError( + f"Azure OpenAI deployment {model_name} not exists, cause: {e.__class__.__name__}:{str(e)}") + except Exception as e: + logging.exception("Azure OpenAI Model retrieve failed.") + raise e + elif model_type == ModelType.EMBEDDINGS: + try: + client = OpenAIEmbeddings( + openai_api_type='azure', + openai_api_version=AZURE_OPENAI_API_VERSION, + deployment=model_name, + chunk_size=16, + max_retries=1, + openai_api_key=credentials['openai_api_key'], + openai_api_base=credentials['openai_api_base'] + ) + + client.embed_query('hi') + except openai.error.OpenAIError as e: + logging.exception("Azure OpenAI Model check error.") + raise CredentialsValidateFailedError( + f"Azure OpenAI deployment {model_name} not exists, cause: {e.__class__.__name__}:{str(e)}") + except Exception as e: + logging.exception("Azure OpenAI Model retrieve failed.") + raise e + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + credentials['openai_api_key'] = encrypter.encrypt_token(tenant_id, credentials['openai_api_key']) + return credentials + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + if self.provider.provider_type == ProviderType.CUSTOM.value: + # convert old provider config to provider models + self._convert_provider_config_to_model_config() + + provider_model = self._get_provider_model(model_name, model_type) + + if not provider_model.encrypted_config: + return { + 'openai_api_base': '', + 'openai_api_key': '', + 'base_model_name': '' + } + + credentials = json.loads(provider_model.encrypted_config) + if credentials['openai_api_key']: + credentials['openai_api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['openai_api_key'] + ) + + if obfuscated: + credentials['openai_api_key'] = encrypter.obfuscated_token(credentials['openai_api_key']) + + return credentials + else: + if hosted_model_providers.azure_openai: + return { + 'openai_api_base': hosted_model_providers.azure_openai.api_base, + 'openai_api_key': hosted_model_providers.azure_openai.api_key, + 'base_model_name': model_name + } + else: + return { + 'openai_api_base': None, + 'openai_api_key': None, + 'base_model_name': None + } + + @classmethod + def is_provider_type_system_supported(cls) -> bool: + if current_app.config['EDITION'] != 'CLOUD': + return False + + if hosted_model_providers.azure_openai: + return True + + return False + + def should_deduct_quota(self): + if hosted_model_providers.azure_openai \ + and hosted_model_providers.azure_openai.quota_limit and hosted_model_providers.azure_openai.quota_limit > 0: + return True + + return False + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + return + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + return {} + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + return {} + + def _convert_provider_config_to_model_config(self): + if self.provider.provider_type == ProviderType.CUSTOM.value \ + and self.provider.is_valid \ + and self.provider.encrypted_config: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'openai_api_base': '', + 'openai_api_key': '', + 'base_model_name': '' + } + + self._add_provider_model( + model_name='gpt-35-turbo', + model_type=ModelType.TEXT_GENERATION, + provider_credentials=credentials + ) + + self._add_provider_model( + model_name='gpt-35-turbo-16k', + model_type=ModelType.TEXT_GENERATION, + provider_credentials=credentials + ) + + self._add_provider_model( + model_name='gpt-4', + model_type=ModelType.TEXT_GENERATION, + provider_credentials=credentials + ) + + self._add_provider_model( + model_name='text-davinci-003', + model_type=ModelType.TEXT_GENERATION, + provider_credentials=credentials + ) + + self._add_provider_model( + model_name='text-embedding-ada-002', + model_type=ModelType.EMBEDDINGS, + provider_credentials=credentials + ) + + self.provider.encrypted_config = None + db.session.commit() + + def _add_provider_model(self, model_name: str, model_type: ModelType, provider_credentials: dict): + credentials = provider_credentials.copy() + credentials['base_model_name'] = model_name + provider_model = ProviderModel( + tenant_id=self.provider.tenant_id, + provider_name=self.provider.provider_name, + model_name=model_name, + model_type=model_type.value, + encrypted_config=json.dumps(credentials), + is_valid=True + ) + db.session.add(provider_model) + db.session.commit() diff --git a/api/core/model_providers/providers/base.py b/api/core/model_providers/providers/base.py new file mode 100644 index 000000000..f10aa9f99 --- /dev/null +++ b/api/core/model_providers/providers/base.py @@ -0,0 +1,283 @@ +from abc import ABC, abstractmethod +from datetime import datetime +from typing import Type, Optional + +from flask import current_app +from pydantic import BaseModel + +from core.model_providers.error import QuotaExceededError, LLMBadRequestError +from extensions.ext_database import db +from core.model_providers.models.entity.model_params import ModelType, ModelKwargsRules +from core.model_providers.models.entity.provider import ProviderQuotaUnit +from core.model_providers.rules import provider_rules +from models.provider import Provider, ProviderType, ProviderModel + + +class BaseModelProvider(BaseModel, ABC): + + provider: Provider + + class Config: + """Configuration for this pydantic object.""" + + arbitrary_types_allowed = True + + @property + @abstractmethod + def provider_name(self): + """ + Returns the name of a provider. + """ + raise NotImplementedError + + def get_rules(self): + """ + Returns the rules of a provider. + """ + return provider_rules[self.provider_name] + + def get_supported_model_list(self, model_type: ModelType) -> list[dict]: + """ + get supported model object list for use. + + :param model_type: + :return: + """ + rules = self.get_rules() + if 'custom' not in rules['support_provider_types']: + return self._get_fixed_model_list(model_type) + + if 'model_flexibility' not in rules: + return self._get_fixed_model_list(model_type) + + if rules['model_flexibility'] == 'fixed': + return self._get_fixed_model_list(model_type) + + # get configurable provider models + provider_models = db.session.query(ProviderModel).filter( + ProviderModel.tenant_id == self.provider.tenant_id, + ProviderModel.provider_name == self.provider.provider_name, + ProviderModel.model_type == model_type.value, + ProviderModel.is_valid == True + ).order_by(ProviderModel.created_at.asc()).all() + + return [{ + 'id': provider_model.model_name, + 'name': provider_model.model_name + } for provider_model in provider_models] + + @abstractmethod + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + """ + get supported model object list for use. + + :param model_type: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_model_class(self, model_type: ModelType) -> Type: + """ + get specific model class. + + :param model_type: + :return: + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + check provider credentials valid. + + :param credentials: + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + """ + encrypt provider credentials for save. + + :param tenant_id: + :param credentials: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param obfuscated: + :return: + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + raise NotImplementedError + + @classmethod + @abstractmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + raise NotImplementedError + + @abstractmethod + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + raise NotImplementedError + + @classmethod + def is_provider_type_system_supported(cls) -> bool: + return current_app.config['EDITION'] == 'CLOUD' + + def check_quota_over_limit(self): + """ + check provider quota over limit. + + :return: + """ + if self.provider.provider_type != ProviderType.SYSTEM.value: + return + + rules = self.get_rules() + if 'system' not in rules['support_provider_types']: + return + + provider = db.session.query(Provider).filter( + db.and_( + Provider.id == self.provider.id, + Provider.is_valid == True, + Provider.quota_limit > Provider.quota_used + ) + ).first() + + if not provider: + raise QuotaExceededError() + + def deduct_quota(self, used_tokens: int = 0) -> None: + """ + deduct available quota when provider type is system or paid. + + :return: + """ + if self.provider.provider_type != ProviderType.SYSTEM.value: + return + + rules = self.get_rules() + if 'system' not in rules['support_provider_types']: + return + + if not self.should_deduct_quota(): + return + + if 'system_config' not in rules: + quota_unit = ProviderQuotaUnit.TIMES.value + elif 'quota_unit' not in rules['system_config']: + quota_unit = ProviderQuotaUnit.TIMES.value + else: + quota_unit = rules['system_config']['quota_unit'] + + if quota_unit == ProviderQuotaUnit.TOKENS.value: + used_quota = used_tokens + else: + used_quota = 1 + + db.session.query(Provider).filter( + Provider.tenant_id == self.provider.tenant_id, + Provider.provider_name == self.provider.provider_name, + Provider.provider_type == self.provider.provider_type, + Provider.quota_type == self.provider.quota_type, + Provider.quota_limit > Provider.quota_used + ).update({'quota_used': Provider.quota_used + used_quota}) + db.session.commit() + + def should_deduct_quota(self): + return False + + def update_last_used(self) -> None: + """ + update last used time. + + :return: + """ + db.session.query(Provider).filter( + Provider.tenant_id == self.provider.tenant_id, + Provider.provider_name == self.provider.provider_name + ).update({'last_used': datetime.utcnow()}) + db.session.commit() + + def get_payment_info(self) -> Optional[dict]: + """ + get product info if it payable. + + :return: + """ + return None + + def _get_provider_model(self, model_name: str, model_type: ModelType) -> ProviderModel: + """ + get provider model. + + :param model_name: + :param model_type: + :return: + """ + provider_model = db.session.query(ProviderModel).filter( + ProviderModel.tenant_id == self.provider.tenant_id, + ProviderModel.provider_name == self.provider.provider_name, + ProviderModel.model_name == model_name, + ProviderModel.model_type == model_type.value, + ProviderModel.is_valid == True + ).first() + + if not provider_model: + raise LLMBadRequestError(f"The model {model_name} does not exist. " + f"Please check the configuration.") + + return provider_model + + +class CredentialsValidateFailedError(Exception): + pass diff --git a/api/core/model_providers/providers/chatglm_provider.py b/api/core/model_providers/providers/chatglm_provider.py new file mode 100644 index 000000000..f905da6f2 --- /dev/null +++ b/api/core/model_providers/providers/chatglm_provider.py @@ -0,0 +1,157 @@ +import json +from json import JSONDecodeError +from typing import Type + +from langchain.llms import ChatGLM + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.chatglm_model import ChatGLMModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from models.provider import ProviderType + + +class ChatGLMProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'chatglm' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'chatglm2-6b', + 'name': 'ChatGLM2-6B', + }, + { + 'id': 'chatglm-6b', + 'name': 'ChatGLM-6B', + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = ChatGLMModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + model_max_tokens = { + 'chatglm-6b': 2000, + 'chatglm2-6b': 32000, + } + + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=2, default=1), + top_p=KwargRule[float](min=0, max=1, default=0.7), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](alias='max_token', min=10, max=model_max_tokens.get(model_name), default=2048), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'api_base' not in credentials: + raise CredentialsValidateFailedError('ChatGLM Endpoint URL must be provided.') + + try: + credential_kwargs = { + 'endpoint_url': credentials['api_base'] + } + + llm = ChatGLM( + max_token=10, + **credential_kwargs + ) + + llm("ping") + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['api_base'] = encrypter.encrypt_token(tenant_id, credentials['api_base']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'api_base': None + } + + if credentials['api_base']: + credentials['api_base'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['api_base'] + ) + + if obfuscated: + credentials['api_base'] = encrypter.obfuscated_token(credentials['api_base']) + + return credentials + + return {} + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/hosted.py b/api/core/model_providers/providers/hosted.py new file mode 100644 index 000000000..b34153d0a --- /dev/null +++ b/api/core/model_providers/providers/hosted.py @@ -0,0 +1,76 @@ +import os +from typing import Optional + +import langchain +from flask import Flask +from pydantic import BaseModel + + +class HostedOpenAI(BaseModel): + api_base: str = None + api_organization: str = None + api_key: str + quota_limit: int = 0 + """Quota limit for the openai hosted model. 0 means unlimited.""" + paid_enabled: bool = False + paid_stripe_price_id: str = None + paid_increase_quota: int = 1 + + +class HostedAzureOpenAI(BaseModel): + api_base: str + api_key: str + quota_limit: int = 0 + """Quota limit for the azure openai hosted model. 0 means unlimited.""" + + +class HostedAnthropic(BaseModel): + api_base: str = None + api_key: str + quota_limit: int = 0 + """Quota limit for the anthropic hosted model. 0 means unlimited.""" + paid_enabled: bool = False + paid_stripe_price_id: str = None + paid_increase_quota: int = 1 + + +class HostedModelProviders(BaseModel): + openai: Optional[HostedOpenAI] = None + azure_openai: Optional[HostedAzureOpenAI] = None + anthropic: Optional[HostedAnthropic] = None + + +hosted_model_providers = HostedModelProviders() + + +def init_app(app: Flask): + if os.environ.get("DEBUG") and os.environ.get("DEBUG").lower() == 'true': + langchain.verbose = True + + if app.config.get("HOSTED_OPENAI_ENABLED"): + hosted_model_providers.openai = HostedOpenAI( + api_base=app.config.get("HOSTED_OPENAI_API_BASE"), + api_organization=app.config.get("HOSTED_OPENAI_API_ORGANIZATION"), + api_key=app.config.get("HOSTED_OPENAI_API_KEY"), + quota_limit=app.config.get("HOSTED_OPENAI_QUOTA_LIMIT"), + paid_enabled=app.config.get("HOSTED_OPENAI_PAID_ENABLED"), + paid_stripe_price_id=app.config.get("HOSTED_OPENAI_PAID_STRIPE_PRICE_ID"), + paid_increase_quota=app.config.get("HOSTED_OPENAI_PAID_INCREASE_QUOTA"), + ) + + if app.config.get("HOSTED_AZURE_OPENAI_ENABLED"): + hosted_model_providers.azure_openai = HostedAzureOpenAI( + api_base=app.config.get("HOSTED_AZURE_OPENAI_API_BASE"), + api_key=app.config.get("HOSTED_AZURE_OPENAI_API_KEY"), + quota_limit=app.config.get("HOSTED_AZURE_OPENAI_QUOTA_LIMIT"), + ) + + if app.config.get("HOSTED_ANTHROPIC_ENABLED"): + hosted_model_providers.anthropic = HostedAnthropic( + api_base=app.config.get("HOSTED_ANTHROPIC_API_BASE"), + api_key=app.config.get("HOSTED_ANTHROPIC_API_KEY"), + quota_limit=app.config.get("HOSTED_ANTHROPIC_QUOTA_LIMIT"), + paid_enabled=app.config.get("HOSTED_ANTHROPIC_PAID_ENABLED"), + paid_stripe_price_id=app.config.get("HOSTED_ANTHROPIC_PAID_STRIPE_PRICE_ID"), + paid_increase_quota=app.config.get("HOSTED_ANTHROPIC_PAID_INCREASE_QUOTA"), + ) diff --git a/api/core/model_providers/providers/huggingface_hub_provider.py b/api/core/model_providers/providers/huggingface_hub_provider.py new file mode 100644 index 000000000..ded94e2a4 --- /dev/null +++ b/api/core/model_providers/providers/huggingface_hub_provider.py @@ -0,0 +1,183 @@ +import json +from typing import Type + +from huggingface_hub import HfApi +from langchain.llms import HuggingFaceEndpoint + +from core.helper import encrypter +from core.model_providers.models.entity.model_params import KwargRule, ModelKwargsRules, ModelType +from core.model_providers.models.llm.huggingface_hub_model import HuggingfaceHubModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError + +from core.model_providers.models.base import BaseProviderModel +from models.provider import ProviderType + + +class HuggingfaceHubProvider(BaseModelProvider): + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'huggingface_hub' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = HuggingfaceHubModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=2, default=1), + top_p=KwargRule[float](min=0.01, max=0.99, default=0.7), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](alias='max_new_tokens', min=10, max=1500, default=200), + ) + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + if model_type != ModelType.TEXT_GENERATION: + raise NotImplementedError + + if 'huggingfacehub_api_type' not in credentials \ + or credentials['huggingfacehub_api_type'] not in ['hosted_inference_api', 'inference_endpoints']: + raise CredentialsValidateFailedError('Hugging Face Hub API Type invalid, ' + 'must be hosted_inference_api or inference_endpoints.') + + if 'huggingfacehub_api_token' not in credentials: + raise CredentialsValidateFailedError('Hugging Face Hub API Token must be provided.') + + hfapi = HfApi(token=credentials['huggingfacehub_api_token']) + + try: + hfapi.whoami() + except Exception: + raise CredentialsValidateFailedError("Invalid API Token.") + + if credentials['huggingfacehub_api_type'] == 'inference_endpoints': + if 'huggingfacehub_endpoint_url' not in credentials: + raise CredentialsValidateFailedError('Hugging Face Hub Endpoint URL must be provided.') + + try: + llm = HuggingFaceEndpoint( + endpoint_url=credentials['huggingfacehub_endpoint_url'], + task="text2text-generation", + model_kwargs={"temperature": 0.5, "max_new_tokens": 200}, + huggingfacehub_api_token=credentials['huggingfacehub_api_token'] + ) + + llm("ping") + except Exception as e: + raise CredentialsValidateFailedError(f"{e.__class__.__name__}:{str(e)}") + else: + try: + model_info = hfapi.model_info(repo_id=model_name) + if not model_info: + raise ValueError(f'Model {model_name} not found.') + + if 'inference' in model_info.cardData and not model_info.cardData['inference']: + raise ValueError(f'Inference API has been turned off for this model {model_name}.') + + VALID_TASKS = ("text2text-generation", "text-generation", "summarization") + if model_info.pipeline_tag not in VALID_TASKS: + raise ValueError(f"Model {model_name} is not a valid task, " + f"must be one of {VALID_TASKS}.") + except Exception as e: + raise CredentialsValidateFailedError(f"{e.__class__.__name__}:{str(e)}") + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + credentials['huggingfacehub_api_token'] = encrypter.encrypt_token(tenant_id, credentials['huggingfacehub_api_token']) + + if credentials['huggingfacehub_api_type'] == 'hosted_inference_api': + hfapi = HfApi(token=credentials['huggingfacehub_api_token']) + model_info = hfapi.model_info(repo_id=model_name) + if not model_info: + raise ValueError(f'Model {model_name} not found.') + + if 'inference' in model_info.cardData and not model_info.cardData['inference']: + raise ValueError(f'Inference API has been turned off for this model {model_name}.') + + credentials['task_type'] = model_info.pipeline_tag + + return credentials + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + if self.provider.provider_type != ProviderType.CUSTOM.value: + raise NotImplementedError + + provider_model = self._get_provider_model(model_name, model_type) + + if not provider_model.encrypted_config: + return { + 'huggingfacehub_api_token': None, + 'task_type': None + } + + credentials = json.loads(provider_model.encrypted_config) + if credentials['huggingfacehub_api_token']: + credentials['huggingfacehub_api_token'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['huggingfacehub_api_token'] + ) + + if obfuscated: + credentials['huggingfacehub_api_token'] = encrypter.obfuscated_token(credentials['huggingfacehub_api_token']) + + return credentials + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + return + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + return {} + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + return {} diff --git a/api/core/model_providers/providers/minimax_provider.py b/api/core/model_providers/providers/minimax_provider.py new file mode 100644 index 000000000..46ec84a6d --- /dev/null +++ b/api/core/model_providers/providers/minimax_provider.py @@ -0,0 +1,179 @@ +import json +from json import JSONDecodeError +from typing import Type + +from langchain.llms import Minimax + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.embedding.minimax_embedding import MinimaxEmbedding +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.minimax_model import MinimaxModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from models.provider import ProviderType, ProviderQuotaType + + +class MinimaxProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'minimax' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'abab5.5-chat', + 'name': 'abab5.5-chat', + }, + { + 'id': 'abab5-chat', + 'name': 'abab5-chat', + } + ] + elif model_type == ModelType.EMBEDDINGS: + return [ + { + 'id': 'embo-01', + 'name': 'embo-01', + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = MinimaxModel + elif model_type == ModelType.EMBEDDINGS: + model_class = MinimaxEmbedding + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + model_max_tokens = { + 'abab5.5-chat': 16384, + 'abab5-chat': 6144, + } + + return ModelKwargsRules( + temperature=KwargRule[float](min=0.01, max=1, default=0.9), + top_p=KwargRule[float](min=0, max=1, default=0.95), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](min=10, max=model_max_tokens.get(model_name, 6144), default=1024), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'minimax_group_id' not in credentials: + raise CredentialsValidateFailedError('MiniMax Group ID must be provided.') + + if 'minimax_api_key' not in credentials: + raise CredentialsValidateFailedError('MiniMax API Key must be provided.') + + try: + credential_kwargs = { + 'minimax_group_id': credentials['minimax_group_id'], + 'minimax_api_key': credentials['minimax_api_key'], + } + + llm = Minimax( + model='abab5.5-chat', + max_tokens=10, + temperature=0.01, + **credential_kwargs + ) + + llm("ping") + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['minimax_api_key'] = encrypter.encrypt_token(tenant_id, credentials['minimax_api_key']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value \ + or (self.provider.provider_type == ProviderType.SYSTEM.value + and self.provider.quota_type == ProviderQuotaType.FREE.value): + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'minimax_group_id': None, + 'minimax_api_key': None, + } + + if credentials['minimax_api_key']: + credentials['minimax_api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['minimax_api_key'] + ) + + if obfuscated: + credentials['minimax_api_key'] = encrypter.obfuscated_token(credentials['minimax_api_key']) + + return credentials + + return {} + + def should_deduct_quota(self): + return True + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/openai_provider.py b/api/core/model_providers/providers/openai_provider.py new file mode 100644 index 000000000..0041d23ca --- /dev/null +++ b/api/core/model_providers/providers/openai_provider.py @@ -0,0 +1,289 @@ +import json +import logging +from json import JSONDecodeError +from typing import Type, Optional + +from flask import current_app +from openai.error import AuthenticationError, OpenAIError + +import openai + +from core.helper import encrypter +from core.model_providers.models.entity.provider import ModelFeature +from core.model_providers.models.speech2text.openai_whisper import OpenAIWhisper +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.embedding.openai_embedding import OpenAIEmbedding +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.openai_model import OpenAIModel +from core.model_providers.models.moderation.openai_moderation import OpenAIModeration +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.model_providers.providers.hosted import hosted_model_providers +from models.provider import ProviderType, ProviderQuotaType + + +class OpenAIProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'openai' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + models = [ + { + 'id': 'gpt-3.5-turbo', + 'name': 'gpt-3.5-turbo', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-3.5-turbo-16k', + 'name': 'gpt-3.5-turbo-16k', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-4', + 'name': 'gpt-4', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'gpt-4-32k', + 'name': 'gpt-4-32k', + 'features': [ + ModelFeature.AGENT_THOUGHT.value + ] + }, + { + 'id': 'text-davinci-003', + 'name': 'text-davinci-003', + } + ] + + if self.provider.provider_type == ProviderType.SYSTEM.value \ + and self.provider.quota_type == ProviderQuotaType.TRIAL.value: + models = [item for item in models if item['id'] not in ['gpt-4', 'gpt-4-32k']] + + return models + elif model_type == ModelType.EMBEDDINGS: + return [ + { + 'id': 'text-embedding-ada-002', + 'name': 'text-embedding-ada-002' + } + ] + elif model_type == ModelType.SPEECH_TO_TEXT: + return [ + { + 'id': 'whisper-1', + 'name': 'whisper-1' + } + ] + elif model_type == ModelType.MODERATION: + return [ + { + 'id': 'text-moderation-stable', + 'name': 'text-moderation-stable' + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = OpenAIModel + elif model_type == ModelType.EMBEDDINGS: + model_class = OpenAIEmbedding + elif model_type == ModelType.MODERATION: + model_class = OpenAIModeration + elif model_type == ModelType.SPEECH_TO_TEXT: + model_class = OpenAIWhisper + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + model_max_tokens = { + 'gpt-4': 8192, + 'gpt-4-32k': 32768, + 'gpt-3.5-turbo': 4096, + 'gpt-3.5-turbo-16k': 16384, + 'text-davinci-003': 4097, + } + + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=2, default=1), + top_p=KwargRule[float](min=0, max=1, default=1), + presence_penalty=KwargRule[float](min=-2, max=2, default=0), + frequency_penalty=KwargRule[float](min=-2, max=2, default=0), + max_tokens=KwargRule[int](min=10, max=model_max_tokens.get(model_name, 4097), default=16), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'openai_api_key' not in credentials: + raise CredentialsValidateFailedError('OpenAI API key is required') + + try: + credentials_kwargs = { + "api_key": credentials['openai_api_key'] + } + + if 'openai_api_base' in credentials and credentials['openai_api_base']: + credentials_kwargs['api_base'] = credentials['openai_api_base'] + '/v1' + + if 'openai_organization' in credentials: + credentials_kwargs['organization'] = credentials['openai_organization'] + + openai.ChatCompletion.create( + messages=[{"role": "user", "content": 'ping'}], + model='gpt-3.5-turbo', + timeout=10, + request_timeout=(5, 30), + max_tokens=20, + **credentials_kwargs + ) + except (AuthenticationError, OpenAIError) as ex: + raise CredentialsValidateFailedError(str(ex)) + except Exception as ex: + logging.exception('OpenAI config validation failed') + raise ex + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['openai_api_key'] = encrypter.encrypt_token(tenant_id, credentials['openai_api_key']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'openai_api_base': None, + 'openai_api_key': self.provider.encrypted_config, + 'openai_organization': None + } + + if credentials['openai_api_key']: + credentials['openai_api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['openai_api_key'] + ) + + if obfuscated: + credentials['openai_api_key'] = encrypter.obfuscated_token(credentials['openai_api_key']) + + if 'openai_api_base' not in credentials or not credentials['openai_api_base']: + credentials['openai_api_base'] = None + else: + credentials['openai_api_base'] = credentials['openai_api_base'] + '/v1' + + if 'openai_organization' not in credentials: + credentials['openai_organization'] = None + + return credentials + else: + if hosted_model_providers.openai: + return { + 'openai_api_base': hosted_model_providers.openai.api_base, + 'openai_api_key': hosted_model_providers.openai.api_key, + 'openai_organization': hosted_model_providers.openai.api_organization + } + else: + return { + 'openai_api_base': None, + 'openai_api_key': None, + 'openai_organization': None + } + + @classmethod + def is_provider_type_system_supported(cls) -> bool: + if current_app.config['EDITION'] != 'CLOUD': + return False + + if hosted_model_providers.openai: + return True + + return False + + def should_deduct_quota(self): + if hosted_model_providers.openai \ + and hosted_model_providers.openai.quota_limit and hosted_model_providers.openai.quota_limit > 0: + return True + + return False + + def get_payment_info(self) -> Optional[dict]: + """ + get payment info if it payable. + + :return: + """ + if hosted_model_providers.openai \ + and hosted_model_providers.openai.paid_enabled: + return { + 'product_id': hosted_model_providers.openai.paid_stripe_price_id, + 'increase_quota': hosted_model_providers.openai.paid_increase_quota, + } + + return None + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/replicate_provider.py b/api/core/model_providers/providers/replicate_provider.py new file mode 100644 index 000000000..404ca1c57 --- /dev/null +++ b/api/core/model_providers/providers/replicate_provider.py @@ -0,0 +1,184 @@ +import json +import logging +from typing import Type + +import replicate +from replicate.exceptions import ReplicateError + +from core.helper import encrypter +from core.model_providers.models.entity.model_params import KwargRule, KwargRuleType, ModelKwargsRules, ModelType +from core.model_providers.models.llm.replicate_model import ReplicateModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError + +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.embedding.replicate_embedding import ReplicateEmbedding +from models.provider import ProviderType + + +class ReplicateProvider(BaseModelProvider): + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'replicate' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = ReplicateModel + elif model_type == ModelType.EMBEDDINGS: + model_class = ReplicateEmbedding + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + model_credentials = self.get_model_credentials(model_name, model_type) + + model = replicate.Client(api_token=model_credentials.get("replicate_api_token")).models.get(model_name) + + try: + version = model.versions.get(model_credentials['model_version']) + except ReplicateError as e: + raise CredentialsValidateFailedError(f"Model {model_name}:{model_credentials['model_version']} not exists, " + f"cause: {e.__class__.__name__}:{str(e)}") + except Exception as e: + logging.exception("Model validate failed.") + raise e + + model_kwargs_rules = ModelKwargsRules() + for key, value in version.openapi_schema['components']['schemas']['Input']['properties'].items(): + if key not in ['debug', 'prompt'] and value['type'] in ['number', 'integer']: + if key == ['temperature', 'top_p']: + kwarg_rule = KwargRule[float]( + type=KwargRuleType.FLOAT.value if value['type'] == 'number' else KwargRuleType.INTEGER.value, + min=float(value.get('minimum')) if value.get('minimum') is not None else None, + max=float(value.get('maximum')) if value.get('maximum') is not None else None, + default=float(value.get('default')) if value.get('default') is not None else None, + ) + if key == 'temperature': + model_kwargs_rules.temperature = kwarg_rule + else: + model_kwargs_rules.top_p = kwarg_rule + elif key in ['max_length', 'max_new_tokens']: + model_kwargs_rules.max_tokens = KwargRule[int]( + alias=key, + type=KwargRuleType.INTEGER.value, + min=int(value.get('minimum')) if value.get('minimum') is not None else 1, + max=int(value.get('maximum')) if value.get('maximum') is not None else 8000, + default=int(value.get('default')) if value.get('default') is not None else 500, + ) + + return model_kwargs_rules + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + if 'replicate_api_token' not in credentials: + raise CredentialsValidateFailedError('Replicate API Key must be provided.') + + if 'model_version' not in credentials: + raise CredentialsValidateFailedError('Replicate Model Version must be provided.') + + if model_name.count("/") != 1: + raise CredentialsValidateFailedError('Replicate Model Name must be provided, ' + 'format: {user_name}/{model_name}') + + version = credentials['model_version'] + try: + model = replicate.Client(api_token=credentials.get("replicate_api_token")).models.get(model_name) + rst = model.versions.get(version) + + if model_type == ModelType.EMBEDDINGS \ + and 'Embedding' not in rst.openapi_schema['components']['schemas']: + raise CredentialsValidateFailedError(f"Model {model_name}:{version} is not a Embedding model.") + elif model_type == ModelType.TEXT_GENERATION \ + and ('type' not in rst.openapi_schema['components']['schemas']['Output']['items'] + or rst.openapi_schema['components']['schemas']['Output']['items']['type'] != 'string'): + raise CredentialsValidateFailedError(f"Model {model_name}:{version} is not a Text Generation model.") + except ReplicateError as e: + raise CredentialsValidateFailedError( + f"Model {model_name}:{version} not exists, cause: {e.__class__.__name__}:{str(e)}") + except Exception as e: + logging.exception("Replicate config validation failed.") + raise e + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + credentials['replicate_api_token'] = encrypter.encrypt_token(tenant_id, credentials['replicate_api_token']) + return credentials + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + if self.provider.provider_type != ProviderType.CUSTOM.value: + raise NotImplementedError + + provider_model = self._get_provider_model(model_name, model_type) + + if not provider_model.encrypted_config: + return { + 'replicate_api_token': None, + } + + credentials = json.loads(provider_model.encrypted_config) + if credentials['replicate_api_token']: + credentials['replicate_api_token'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['replicate_api_token'] + ) + + if obfuscated: + credentials['replicate_api_token'] = encrypter.obfuscated_token(credentials['replicate_api_token']) + + return credentials + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + return + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + return {} + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + return {} diff --git a/api/core/model_providers/providers/spark_provider.py b/api/core/model_providers/providers/spark_provider.py new file mode 100644 index 000000000..7bcd060be --- /dev/null +++ b/api/core/model_providers/providers/spark_provider.py @@ -0,0 +1,191 @@ +import json +import logging +from json import JSONDecodeError +from typing import Type + +from flask import current_app +from langchain.schema import HumanMessage + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.spark_model import SparkModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.third_party.langchain.llms.spark import ChatSpark +from core.third_party.spark.spark_llm import SparkError +from models.provider import ProviderType, ProviderQuotaType + + +class SparkProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'spark' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'spark', + 'name': '星火认知大模型', + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = SparkModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + return ModelKwargsRules( + temperature=KwargRule[float](min=0, max=1, default=0.5), + top_p=KwargRule[float](enabled=False), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](min=10, max=4096, default=2048), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'app_id' not in credentials: + raise CredentialsValidateFailedError('Spark app_id must be provided.') + + if 'api_key' not in credentials: + raise CredentialsValidateFailedError('Spark api_key must be provided.') + + if 'api_secret' not in credentials: + raise CredentialsValidateFailedError('Spark api_secret must be provided.') + + try: + credential_kwargs = { + 'app_id': credentials['app_id'], + 'api_key': credentials['api_key'], + 'api_secret': credentials['api_secret'], + } + + chat_llm = ChatSpark( + max_tokens=10, + temperature=0.01, + **credential_kwargs + ) + + messages = [ + HumanMessage( + content="ping" + ) + ] + + chat_llm(messages) + except SparkError as ex: + raise CredentialsValidateFailedError(str(ex)) + except Exception as ex: + logging.exception('Spark config validation failed') + raise ex + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['api_key'] = encrypter.encrypt_token(tenant_id, credentials['api_key']) + credentials['api_secret'] = encrypter.encrypt_token(tenant_id, credentials['api_secret']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value \ + or (self.provider.provider_type == ProviderType.SYSTEM.value + and self.provider.quota_type == ProviderQuotaType.FREE.value): + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'app_id': None, + 'api_key': None, + 'api_secret': None, + } + + if credentials['api_key']: + credentials['api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['api_key'] + ) + + if obfuscated: + credentials['api_key'] = encrypter.obfuscated_token(credentials['api_key']) + + if credentials['api_secret']: + credentials['api_secret'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['api_secret'] + ) + + if obfuscated: + credentials['api_secret'] = encrypter.obfuscated_token(credentials['api_secret']) + + return credentials + else: + return { + 'app_id': None, + 'api_key': None, + 'api_secret': None, + } + + def should_deduct_quota(self): + return True + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/tongyi_provider.py b/api/core/model_providers/providers/tongyi_provider.py new file mode 100644 index 000000000..ffa7c72db --- /dev/null +++ b/api/core/model_providers/providers/tongyi_provider.py @@ -0,0 +1,157 @@ +import json +from json import JSONDecodeError +from typing import Type + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.tongyi_model import TongyiModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.third_party.langchain.llms.tongyi_llm import EnhanceTongyi +from models.provider import ProviderType + + +class TongyiProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'tongyi' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'qwen-v1', + 'name': 'qwen-v1', + }, + { + 'id': 'qwen-plus-v1', + 'name': 'qwen-plus-v1', + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = TongyiModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + model_max_tokens = { + 'qwen-v1': 1500, + 'qwen-plus-v1': 6500 + } + + return ModelKwargsRules( + temperature=KwargRule[float](enabled=False), + top_p=KwargRule[float](min=0, max=1, default=0.8), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](min=10, max=model_max_tokens.get(model_name), default=1024), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'dashscope_api_key' not in credentials: + raise CredentialsValidateFailedError('Dashscope API Key must be provided.') + + try: + credential_kwargs = { + 'dashscope_api_key': credentials['dashscope_api_key'] + } + + llm = EnhanceTongyi( + model_name='qwen-v1', + max_retries=1, + **credential_kwargs + ) + + llm("ping") + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['dashscope_api_key'] = encrypter.encrypt_token(tenant_id, credentials['dashscope_api_key']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'dashscope_api_key': None + } + + if credentials['dashscope_api_key']: + credentials['dashscope_api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['dashscope_api_key'] + ) + + if obfuscated: + credentials['dashscope_api_key'] = encrypter.obfuscated_token(credentials['dashscope_api_key']) + + return credentials + + return {} + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/providers/wenxin_provider.py b/api/core/model_providers/providers/wenxin_provider.py new file mode 100644 index 000000000..1c62b72d9 --- /dev/null +++ b/api/core/model_providers/providers/wenxin_provider.py @@ -0,0 +1,182 @@ +import json +from json import JSONDecodeError +from typing import Type + +from core.helper import encrypter +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelKwargsRules, KwargRule, ModelType +from core.model_providers.models.llm.wenxin_model import WenxinModel +from core.model_providers.providers.base import BaseModelProvider, CredentialsValidateFailedError +from core.third_party.langchain.llms.wenxin import Wenxin +from models.provider import ProviderType + + +class WenxinProvider(BaseModelProvider): + + @property + def provider_name(self): + """ + Returns the name of a provider. + """ + return 'wenxin' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + if model_type == ModelType.TEXT_GENERATION: + return [ + { + 'id': 'ernie-bot', + 'name': 'ERNIE-Bot', + }, + { + 'id': 'ernie-bot-turbo', + 'name': 'ERNIE-Bot-turbo', + }, + { + 'id': 'bloomz-7b', + 'name': 'BLOOMZ-7B', + } + ] + else: + return [] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + """ + Returns the model class. + + :param model_type: + :return: + """ + if model_type == ModelType.TEXT_GENERATION: + model_class = WenxinModel + else: + raise NotImplementedError + + return model_class + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + """ + get model parameter rules. + + :param model_name: + :param model_type: + :return: + """ + if model_name in ['ernie-bot', 'ernie-bot-turbo']: + return ModelKwargsRules( + temperature=KwargRule[float](min=0.01, max=1, default=0.95), + top_p=KwargRule[float](min=0.01, max=1, default=0.8), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](enabled=False), + ) + else: + return ModelKwargsRules( + temperature=KwargRule[float](enabled=False), + top_p=KwargRule[float](enabled=False), + presence_penalty=KwargRule[float](enabled=False), + frequency_penalty=KwargRule[float](enabled=False), + max_tokens=KwargRule[int](enabled=False), + ) + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + """ + Validates the given credentials. + """ + if 'api_key' not in credentials: + raise CredentialsValidateFailedError('Wenxin api_key must be provided.') + + if 'secret_key' not in credentials: + raise CredentialsValidateFailedError('Wenxin secret_key must be provided.') + + try: + credential_kwargs = { + 'api_key': credentials['api_key'], + 'secret_key': credentials['secret_key'], + } + + llm = Wenxin( + temperature=0.01, + **credential_kwargs + ) + + llm("ping") + except Exception as ex: + raise CredentialsValidateFailedError(str(ex)) + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + credentials['api_key'] = encrypter.encrypt_token(tenant_id, credentials['api_key']) + credentials['secret_key'] = encrypter.encrypt_token(tenant_id, credentials['secret_key']) + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + if self.provider.provider_type == ProviderType.CUSTOM.value: + try: + credentials = json.loads(self.provider.encrypted_config) + except JSONDecodeError: + credentials = { + 'api_key': None, + 'secret_key': None, + } + + if credentials['api_key']: + credentials['api_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['api_key'] + ) + + if obfuscated: + credentials['api_key'] = encrypter.obfuscated_token(credentials['api_key']) + + if credentials['secret_key']: + credentials['secret_key'] = encrypter.decrypt_token( + self.provider.tenant_id, + credentials['secret_key'] + ) + + if obfuscated: + credentials['secret_key'] = encrypter.obfuscated_token(credentials['secret_key']) + + return credentials + else: + return { + 'api_key': None, + 'secret_key': None, + } + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + """ + check model credentials valid. + + :param model_name: + :param model_type: + :param credentials: + """ + return + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + """ + encrypt model credentials for save. + + :param tenant_id: + :param model_name: + :param model_type: + :param credentials: + :return: + """ + return {} + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + """ + get credentials for llm use. + + :param model_name: + :param model_type: + :param obfuscated: + :return: + """ + return self.get_provider_credentials(obfuscated) diff --git a/api/core/model_providers/rules.py b/api/core/model_providers/rules.py new file mode 100644 index 000000000..5a911500d --- /dev/null +++ b/api/core/model_providers/rules.py @@ -0,0 +1,47 @@ +import json +import os + + +def init_provider_rules(): + # Get the absolute path of the subdirectory + subdirectory_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'rules') + + # Path to the providers.json file + providers_json_file_path = os.path.join(subdirectory_path, '_providers.json') + + try: + # Open the JSON file and read its content + with open(providers_json_file_path, 'r') as json_file: + data = json.load(json_file) + # Store the content in a dictionary with the key as the file name (without extension) + provider_names = data + except FileNotFoundError: + return "JSON file not found or path error" + except json.JSONDecodeError: + return "JSON file decoding error" + + # Dictionary to store the content of all JSON files + json_data = {} + + try: + # Loop through all files in the directory + for provider_name in provider_names: + filename = provider_name + '.json' + + # Path to each JSON file + json_file_path = os.path.join(subdirectory_path, filename) + + # Open each JSON file and read its content + with open(json_file_path, 'r') as json_file: + data = json.load(json_file) + # Store the content in the dictionary with the key as the file name (without extension) + json_data[os.path.splitext(filename)[0]] = data + + return json_data + except FileNotFoundError: + return "JSON file not found or path error" + except json.JSONDecodeError: + return "JSON file decoding error" + + +provider_rules = init_provider_rules() diff --git a/api/core/model_providers/rules/_providers.json b/api/core/model_providers/rules/_providers.json new file mode 100644 index 000000000..ad53f425c --- /dev/null +++ b/api/core/model_providers/rules/_providers.json @@ -0,0 +1,12 @@ +[ + "openai", + "azure_openai", + "anthropic", + "minimax", + "tongyi", + "spark", + "wenxin", + "chatglm", + "replicate", + "huggingface_hub" +] \ No newline at end of file diff --git a/api/core/model_providers/rules/anthropic.json b/api/core/model_providers/rules/anthropic.json new file mode 100644 index 000000000..56806aa7c --- /dev/null +++ b/api/core/model_providers/rules/anthropic.json @@ -0,0 +1,15 @@ +{ + "support_provider_types": [ + "system", + "custom" + ], + "system_config": { + "supported_quota_types": [ + "trial", + "paid" + ], + "quota_unit": "times", + "quota_limit": 1000 + }, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/azure_openai.json b/api/core/model_providers/rules/azure_openai.json new file mode 100644 index 000000000..5badb0717 --- /dev/null +++ b/api/core/model_providers/rules/azure_openai.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "configurable" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/chatglm.json b/api/core/model_providers/rules/chatglm.json new file mode 100644 index 000000000..0af3e61ec --- /dev/null +++ b/api/core/model_providers/rules/chatglm.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/huggingface_hub.json b/api/core/model_providers/rules/huggingface_hub.json new file mode 100644 index 000000000..5badb0717 --- /dev/null +++ b/api/core/model_providers/rules/huggingface_hub.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "configurable" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/minimax.json b/api/core/model_providers/rules/minimax.json new file mode 100644 index 000000000..e19b885a2 --- /dev/null +++ b/api/core/model_providers/rules/minimax.json @@ -0,0 +1,13 @@ +{ + "support_provider_types": [ + "system", + "custom" + ], + "system_config": { + "supported_quota_types": [ + "free" + ], + "quota_unit": "tokens" + }, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/openai.json b/api/core/model_providers/rules/openai.json new file mode 100644 index 000000000..e615de606 --- /dev/null +++ b/api/core/model_providers/rules/openai.json @@ -0,0 +1,14 @@ +{ + "support_provider_types": [ + "system", + "custom" + ], + "system_config": { + "supported_quota_types": [ + "trial" + ], + "quota_unit": "times", + "quota_limit": 200 + }, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/replicate.json b/api/core/model_providers/rules/replicate.json new file mode 100644 index 000000000..5badb0717 --- /dev/null +++ b/api/core/model_providers/rules/replicate.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "configurable" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/spark.json b/api/core/model_providers/rules/spark.json new file mode 100644 index 000000000..e19b885a2 --- /dev/null +++ b/api/core/model_providers/rules/spark.json @@ -0,0 +1,13 @@ +{ + "support_provider_types": [ + "system", + "custom" + ], + "system_config": { + "supported_quota_types": [ + "free" + ], + "quota_unit": "tokens" + }, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/tongyi.json b/api/core/model_providers/rules/tongyi.json new file mode 100644 index 000000000..0af3e61ec --- /dev/null +++ b/api/core/model_providers/rules/tongyi.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/model_providers/rules/wenxin.json b/api/core/model_providers/rules/wenxin.json new file mode 100644 index 000000000..0af3e61ec --- /dev/null +++ b/api/core/model_providers/rules/wenxin.json @@ -0,0 +1,7 @@ +{ + "support_provider_types": [ + "custom" + ], + "system_config": null, + "model_flexibility": "fixed" +} \ No newline at end of file diff --git a/api/core/orchestrator_rule_parser.py b/api/core/orchestrator_rule_parser.py index 38361f65c..021f8c935 100644 --- a/api/core/orchestrator_rule_parser.py +++ b/api/core/orchestrator_rule_parser.py @@ -3,7 +3,6 @@ from typing import Optional from langchain import WikipediaAPIWrapper from langchain.callbacks.manager import Callbacks -from langchain.chat_models import ChatOpenAI from langchain.memory.chat_memory import BaseChatMemory from langchain.tools import BaseTool, Tool, WikipediaQueryRun from pydantic import BaseModel, Field @@ -15,7 +14,8 @@ from core.callback_handler.main_chain_gather_callback_handler import MainChainGa from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler from core.chain.sensitive_word_avoidance_chain import SensitiveWordAvoidanceChain from core.conversation_message_task import ConversationMessageTask -from core.llm.llm_builder import LLMBuilder +from core.model_providers.model_factory import ModelFactory +from core.model_providers.models.entity.model_params import ModelKwargs, ModelMode from core.tool.dataset_retriever_tool import DatasetRetrieverTool from core.tool.provider.serpapi_provider import SerpAPIToolProvider from core.tool.serpapi_wrapper import OptimizedSerpAPIWrapper, OptimizedSerpAPIInput @@ -32,11 +32,9 @@ class OrchestratorRuleParser: def __init__(self, tenant_id: str, app_model_config: AppModelConfig): self.tenant_id = tenant_id self.app_model_config = app_model_config - self.agent_summary_model_name = "gpt-3.5-turbo-16k" - self.dataset_retrieve_model_name = "gpt-3.5-turbo" def to_agent_executor(self, conversation_message_task: ConversationMessageTask, memory: Optional[BaseChatMemory], - rest_tokens: int, chain_callback: MainChainGatherCallbackHandler) \ + rest_tokens: int, chain_callback: MainChainGatherCallbackHandler) \ -> Optional[AgentExecutor]: if not self.app_model_config.agent_mode_dict: return None @@ -47,43 +45,50 @@ class OrchestratorRuleParser: chain = None if agent_mode_config and agent_mode_config.get('enabled'): tool_configs = agent_mode_config.get('tools', []) + agent_provider_name = model_dict.get('provider', 'openai') agent_model_name = model_dict.get('name', 'gpt-4') + agent_model_instance = ModelFactory.get_text_generation_model( + tenant_id=self.tenant_id, + model_provider_name=agent_provider_name, + model_name=agent_model_name, + model_kwargs=ModelKwargs( + temperature=0.2, + top_p=0.3, + max_tokens=1500 + ) + ) + # add agent callback to record agent thoughts agent_callback = AgentLoopGatherCallbackHandler( - model_name=agent_model_name, + model_instant=agent_model_instance, conversation_message_task=conversation_message_task ) chain_callback.agent_callback = agent_callback - - agent_llm = LLMBuilder.to_llm( - tenant_id=self.tenant_id, - model_name=agent_model_name, - temperature=0, - max_tokens=1500, - callbacks=[agent_callback, DifyStdOutCallbackHandler()] - ) + agent_model_instance.add_callbacks([agent_callback]) planning_strategy = PlanningStrategy(agent_mode_config.get('strategy', 'router')) # only OpenAI chat model (include Azure) support function call, use ReACT instead - if not isinstance(agent_llm, ChatOpenAI) \ - and planning_strategy in [PlanningStrategy.FUNCTION_CALL, PlanningStrategy.MULTI_FUNCTION_CALL]: - planning_strategy = PlanningStrategy.REACT + if agent_model_instance.model_mode != ModelMode.CHAT \ + or agent_model_instance.name not in ['openai', 'azure_openai']: + if planning_strategy in [PlanningStrategy.FUNCTION_CALL, PlanningStrategy.MULTI_FUNCTION_CALL]: + planning_strategy = PlanningStrategy.REACT + elif planning_strategy == PlanningStrategy.ROUTER: + planning_strategy = PlanningStrategy.REACT_ROUTER - summary_llm = LLMBuilder.to_llm( + summary_model_instance = ModelFactory.get_text_generation_model( tenant_id=self.tenant_id, - model_name=self.agent_summary_model_name, - temperature=0, - max_tokens=500, - callbacks=[DifyStdOutCallbackHandler()] + model_kwargs=ModelKwargs( + temperature=0, + max_tokens=500 + ) ) tools = self.to_tools( tool_configs=tool_configs, conversation_message_task=conversation_message_task, - model_name=self.agent_summary_model_name, rest_tokens=rest_tokens, callbacks=[agent_callback, DifyStdOutCallbackHandler()] ) @@ -91,20 +96,11 @@ class OrchestratorRuleParser: if len(tools) == 0: return None - dataset_llm = LLMBuilder.to_llm( - tenant_id=self.tenant_id, - model_name=self.dataset_retrieve_model_name, - temperature=0, - max_tokens=500, - callbacks=[DifyStdOutCallbackHandler()] - ) - agent_configuration = AgentConfiguration( strategy=planning_strategy, - llm=agent_llm, + model_instance=agent_model_instance, tools=tools, - summary_llm=summary_llm, - dataset_llm=dataset_llm, + summary_model_instance=summary_model_instance, memory=memory, callbacks=[chain_callback, agent_callback], max_iterations=10, @@ -141,13 +137,12 @@ class OrchestratorRuleParser: return None def to_tools(self, tool_configs: list, conversation_message_task: ConversationMessageTask, - model_name: str, rest_tokens: int, callbacks: Callbacks = None) -> list[BaseTool]: + rest_tokens: int, callbacks: Callbacks = None) -> list[BaseTool]: """ Convert app agent tool configs to tools :param rest_tokens: :param tool_configs: app agent tool configs - :param model_name: :param conversation_message_task: :param callbacks: :return: @@ -163,7 +158,7 @@ class OrchestratorRuleParser: if tool_type == "dataset": tool = self.to_dataset_retriever_tool(tool_val, conversation_message_task, rest_tokens) elif tool_type == "web_reader": - tool = self.to_web_reader_tool(model_name) + tool = self.to_web_reader_tool() elif tool_type == "google_search": tool = self.to_google_search_tool() elif tool_type == "wikipedia": @@ -205,20 +200,22 @@ class OrchestratorRuleParser: return tool - def to_web_reader_tool(self, model_name: str) -> Optional[BaseTool]: + def to_web_reader_tool(self) -> Optional[BaseTool]: """ A tool for reading web pages :return: """ - summary_llm = LLMBuilder.to_llm( + summary_model_instance = ModelFactory.get_text_generation_model( tenant_id=self.tenant_id, - model_name=model_name, - temperature=0, - max_tokens=500, - callbacks=[DifyStdOutCallbackHandler()] + model_kwargs=ModelKwargs( + temperature=0, + max_tokens=500 + ) ) + summary_llm = summary_model_instance.client + tool = WebReaderTool( llm=summary_llm, max_chunk_length=4000, @@ -273,6 +270,10 @@ class OrchestratorRuleParser: def _dynamic_calc_retrieve_k(cls, dataset: Dataset, rest_tokens: int) -> int: DEFAULT_K = 2 CONTEXT_TOKENS_PERCENT = 0.3 + + if rest_tokens == -1: + return DEFAULT_K + processing_rule = dataset.latest_process_rule if not processing_rule: return DEFAULT_K diff --git a/api/core/third_party/langchain/embeddings/__init__.py b/api/core/third_party/langchain/embeddings/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/third_party/langchain/embeddings/replicate_embedding.py b/api/core/third_party/langchain/embeddings/replicate_embedding.py new file mode 100644 index 000000000..0113ba8db --- /dev/null +++ b/api/core/third_party/langchain/embeddings/replicate_embedding.py @@ -0,0 +1,99 @@ +"""Wrapper around Replicate embedding models.""" +from typing import Any, Dict, List, Optional + +from pydantic import BaseModel, Extra, root_validator + +from langchain.embeddings.base import Embeddings +from langchain.utils import get_from_dict_or_env + + +class ReplicateEmbeddings(BaseModel, Embeddings): + """Wrapper around Replicate embedding models. + + To use, you should have the ``replicate`` python package installed. + """ + + client: Any #: :meta private: + model: str + """Model name to use.""" + + replicate_api_token: Optional[str] = None + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + replicate_api_token = get_from_dict_or_env( + values, "replicate_api_token", "REPLICATE_API_TOKEN" + ) + try: + import replicate as replicate_python + + values["client"] = replicate_python.Client(api_token=replicate_api_token) + except ImportError: + raise ImportError( + "Could not import replicate python package. " + "Please install it with `pip install replicate`." + ) + return values + + def embed_documents(self, texts: List[str]) -> List[List[float]]: + """Call out to Replicate's embedding endpoint. + + Args: + texts: The list of texts to embed. + + Returns: + List of embeddings, one for each text. + """ + # get the model and version + model_str, version_str = self.model.split(":") + model = self.client.models.get(model_str) + version = model.versions.get(version_str) + + # sort through the openapi schema to get the name of the first input + input_properties = sorted( + version.openapi_schema["components"]["schemas"]["Input"][ + "properties" + ].items(), + key=lambda item: item[1].get("x-order", 0), + ) + first_input_name = input_properties[0][0] + + embeddings = [] + for text in texts: + result = self.client.run(self.model, input={first_input_name: text}) + embeddings.append(result[0].get('embedding')) + + return [list(map(float, e)) for e in embeddings] + + def embed_query(self, text: str) -> List[float]: + """Call out to Replicate's embedding endpoint. + + Args: + text: The text to embed. + + Returns: + Embeddings for the text. + """ + # get the model and version + model_str, version_str = self.model.split(":") + model = self.client.models.get(model_str) + version = model.versions.get(version_str) + + # sort through the openapi schema to get the name of the first input + input_properties = sorted( + version.openapi_schema["components"]["schemas"]["Input"][ + "properties" + ].items(), + key=lambda item: item[1].get("x-order", 0), + ) + first_input_name = input_properties[0][0] + result = self.client.run(self.model, input={first_input_name: text}) + embedding = result[0].get('embedding') + + return list(map(float, embedding)) diff --git a/api/core/third_party/langchain/llms/__init__.py b/api/core/third_party/langchain/llms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/llm/streamable_azure_chat_open_ai.py b/api/core/third_party/langchain/llms/azure_chat_open_ai.py similarity index 75% rename from api/core/llm/streamable_azure_chat_open_ai.py rename to api/core/third_party/langchain/llms/azure_chat_open_ai.py index 1e2681fa8..a1f6aa6ec 100644 --- a/api/core/llm/streamable_azure_chat_open_ai.py +++ b/api/core/third_party/langchain/llms/azure_chat_open_ai.py @@ -1,15 +1,13 @@ -from langchain.callbacks.manager import Callbacks, CallbackManagerForLLMRun -from langchain.chat_models.openai import _convert_dict_to_message -from langchain.schema import BaseMessage, LLMResult, ChatResult, ChatGeneration -from langchain.chat_models import AzureChatOpenAI -from typing import Optional, List, Dict, Any, Tuple, Union +from typing import Dict, Any, Optional, List, Tuple, Union +from langchain.callbacks.manager import CallbackManagerForLLMRun +from langchain.chat_models import AzureChatOpenAI +from langchain.chat_models.openai import _convert_dict_to_message +from langchain.schema import ChatResult, BaseMessage, ChatGeneration from pydantic import root_validator -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions - -class StreamableAzureChatOpenAI(AzureChatOpenAI): +class EnhanceAzureChatOpenAI(AzureChatOpenAI): request_timeout: Optional[Union[float, Tuple[float, float]]] = (5.0, 300.0) """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 1 @@ -52,32 +50,6 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI): "organization": self.openai_organization if self.openai_organization else None, } - @handle_openai_exceptions - def generate( - self, - messages: List[List[BaseMessage]], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - return super().generate(messages, stop, callbacks, **kwargs) - - @classmethod - def get_kwargs_from_model_params(cls, params: dict): - model_kwargs = { - 'top_p': params.get('top_p', 1), - 'frequency_penalty': params.get('frequency_penalty', 0), - 'presence_penalty': params.get('presence_penalty', 0), - } - - del params['top_p'] - del params['frequency_penalty'] - del params['presence_penalty'] - - params['model_kwargs'] = model_kwargs - - return params - def _generate( self, messages: List[BaseMessage], @@ -116,4 +88,4 @@ class StreamableAzureChatOpenAI(AzureChatOpenAI): ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) - return self._create_chat_result(response) + return self._create_chat_result(response) \ No newline at end of file diff --git a/api/core/llm/streamable_azure_open_ai.py b/api/core/third_party/langchain/llms/azure_open_ai.py similarity index 87% rename from api/core/llm/streamable_azure_open_ai.py rename to api/core/third_party/langchain/llms/azure_open_ai.py index ab67f5abc..f4d715533 100644 --- a/api/core/llm/streamable_azure_open_ai.py +++ b/api/core/third_party/langchain/llms/azure_open_ai.py @@ -1,16 +1,14 @@ -from langchain.callbacks.manager import Callbacks, CallbackManagerForLLMRun +from typing import Dict, Any, Mapping, Optional, List, Union, Tuple + +from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms import AzureOpenAI from langchain.llms.openai import _streaming_response_template, completion_with_retry, _update_response, \ update_token_usage from langchain.schema import LLMResult -from typing import Optional, List, Dict, Mapping, Any, Union, Tuple - from pydantic import root_validator -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions - -class StreamableAzureOpenAI(AzureOpenAI): +class EnhanceAzureOpenAI(AzureOpenAI): openai_api_type: str = "azure" openai_api_version: str = "" request_timeout: Optional[Union[float, Tuple[float, float]]] = (5.0, 300.0) @@ -56,20 +54,6 @@ class StreamableAzureOpenAI(AzureOpenAI): "organization": self.openai_organization if self.openai_organization else None, }} - @handle_openai_exceptions - def generate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - return super().generate(prompts, stop, callbacks, **kwargs) - - @classmethod - def get_kwargs_from_model_params(cls, params: dict): - return params - def _generate( self, prompts: List[str], diff --git a/api/core/llm/streamable_chat_open_ai.py b/api/core/third_party/langchain/llms/chat_open_ai.py similarity index 62% rename from api/core/llm/streamable_chat_open_ai.py rename to api/core/third_party/langchain/llms/chat_open_ai.py index 64e454844..b409a9889 100644 --- a/api/core/llm/streamable_chat_open_ai.py +++ b/api/core/third_party/langchain/llms/chat_open_ai.py @@ -1,16 +1,12 @@ import os -from langchain.callbacks.manager import Callbacks -from langchain.schema import BaseMessage, LLMResult -from langchain.chat_models import ChatOpenAI -from typing import Optional, List, Dict, Any, Union, Tuple +from typing import Dict, Any, Optional, Union, Tuple +from langchain.chat_models import ChatOpenAI from pydantic import root_validator -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions - -class StreamableChatOpenAI(ChatOpenAI): +class EnhanceChatOpenAI(ChatOpenAI): request_timeout: Optional[Union[float, Tuple[float, float]]] = (5.0, 300.0) """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 1 @@ -51,29 +47,3 @@ class StreamableChatOpenAI(ChatOpenAI): "api_key": self.openai_api_key, "organization": self.openai_organization if self.openai_organization else None, } - - @handle_openai_exceptions - def generate( - self, - messages: List[List[BaseMessage]], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - return super().generate(messages, stop, callbacks, **kwargs) - - @classmethod - def get_kwargs_from_model_params(cls, params: dict): - model_kwargs = { - 'top_p': params.get('top_p', 1), - 'frequency_penalty': params.get('frequency_penalty', 0), - 'presence_penalty': params.get('presence_penalty', 0), - } - - del params['top_p'] - del params['frequency_penalty'] - del params['presence_penalty'] - - params['model_kwargs'] = model_kwargs - - return params diff --git a/api/core/llm/fake.py b/api/core/third_party/langchain/llms/fake.py similarity index 85% rename from api/core/llm/fake.py rename to api/core/third_party/langchain/llms/fake.py index b7190220f..b901df935 100644 --- a/api/core/llm/fake.py +++ b/api/core/third_party/langchain/llms/fake.py @@ -1,9 +1,11 @@ import time -from typing import List, Optional, Any, Mapping +from typing import List, Optional, Any, Mapping, Callable from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.chat_models.base import SimpleChatModel -from langchain.schema import BaseMessage, ChatResult, AIMessage, ChatGeneration, BaseLanguageModel +from langchain.schema import BaseMessage, ChatResult, AIMessage, ChatGeneration + +from core.model_providers.models.entity.message import str_to_prompt_messages class FakeLLM(SimpleChatModel): @@ -12,7 +14,7 @@ class FakeLLM(SimpleChatModel): streaming: bool = False """Whether to stream the results or not.""" response: str - origin_llm: Optional[BaseLanguageModel] = None + num_token_func: Optional[Callable] = None @property def _llm_type(self) -> str: @@ -33,7 +35,7 @@ class FakeLLM(SimpleChatModel): return {"response": self.response} def get_num_tokens(self, text: str) -> int: - return self.origin_llm.get_num_tokens(text) if self.origin_llm else 0 + return self.num_token_func(str_to_prompt_messages([text])) if self.num_token_func else 0 def _generate( self, diff --git a/api/core/llm/streamable_open_ai.py b/api/core/third_party/langchain/llms/open_ai.py similarity index 74% rename from api/core/llm/streamable_open_ai.py rename to api/core/third_party/langchain/llms/open_ai.py index cfb32da3a..a16998ab5 100644 --- a/api/core/llm/streamable_open_ai.py +++ b/api/core/third_party/langchain/llms/open_ai.py @@ -1,15 +1,11 @@ import os -from langchain.callbacks.manager import Callbacks -from langchain.schema import LLMResult -from typing import Optional, List, Dict, Any, Mapping, Union, Tuple +from typing import Dict, Any, Mapping, Optional, Union, Tuple from langchain import OpenAI from pydantic import root_validator -from core.llm.wrappers.openai_wrapper import handle_openai_exceptions - -class StreamableOpenAI(OpenAI): +class EnhanceOpenAI(OpenAI): request_timeout: Optional[Union[float, Tuple[float, float]]] = (5.0, 300.0) """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 1 @@ -52,17 +48,3 @@ class StreamableOpenAI(OpenAI): "api_key": self.openai_api_key, "organization": self.openai_organization if self.openai_organization else None, }} - - @handle_openai_exceptions - def generate( - self, - prompts: List[str], - stop: Optional[List[str]] = None, - callbacks: Callbacks = None, - **kwargs: Any, - ) -> LLMResult: - return super().generate(prompts, stop, callbacks, **kwargs) - - @classmethod - def get_kwargs_from_model_params(cls, params: dict): - return params diff --git a/api/core/third_party/langchain/llms/replicate_llm.py b/api/core/third_party/langchain/llms/replicate_llm.py new file mode 100644 index 000000000..556ef2b10 --- /dev/null +++ b/api/core/third_party/langchain/llms/replicate_llm.py @@ -0,0 +1,75 @@ +from typing import Dict, Optional, List, Any + +from langchain.callbacks.manager import CallbackManagerForLLMRun +from langchain.llms import Replicate +from langchain.utils import get_from_dict_or_env +from pydantic import root_validator + + +class EnhanceReplicate(Replicate): + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + replicate_api_token = get_from_dict_or_env( + values, "replicate_api_token", "REPLICATE_API_TOKEN" + ) + values["replicate_api_token"] = replicate_api_token + return values + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + """Call to replicate endpoint.""" + try: + import replicate as replicate_python + except ImportError: + raise ImportError( + "Could not import replicate python package. " + "Please install it with `pip install replicate`." + ) + + client = replicate_python.Client(api_token=self.replicate_api_token) + + # get the model and version + model_str, version_str = self.model.split(":") + model = client.models.get(model_str) + version = model.versions.get(version_str) + + # sort through the openapi schema to get the name of the first input + input_properties = sorted( + version.openapi_schema["components"]["schemas"]["Input"][ + "properties" + ].items(), + key=lambda item: item[1].get("x-order", 0), + ) + first_input_name = input_properties[0][0] + inputs = {first_input_name: prompt, **self.input} + + prediction = client.predictions.create( + version=version, input={**inputs, **kwargs} + ) + current_completion: str = "" + stop_condition_reached = False + for output in prediction.output_iterator(): + current_completion += output + + # test for stop conditions, if specified + if stop: + for s in stop: + if s in current_completion: + prediction.cancel() + stop_index = current_completion.find(s) + current_completion = current_completion[:stop_index] + stop_condition_reached = True + break + + if stop_condition_reached: + break + + if self.streaming and run_manager: + run_manager.on_llm_new_token(output) + return current_completion diff --git a/api/core/third_party/langchain/llms/spark.py b/api/core/third_party/langchain/llms/spark.py new file mode 100644 index 000000000..23eb7472a --- /dev/null +++ b/api/core/third_party/langchain/llms/spark.py @@ -0,0 +1,185 @@ +import re +import string +import threading +from _decimal import Decimal, ROUND_HALF_UP +from typing import Dict, List, Optional, Any, Mapping + +from langchain.callbacks.manager import CallbackManagerForLLMRun, AsyncCallbackManagerForLLMRun +from langchain.chat_models.base import BaseChatModel +from langchain.llms.utils import enforce_stop_tokens +from langchain.schema import BaseMessage, ChatMessage, HumanMessage, AIMessage, SystemMessage, ChatResult, \ + ChatGeneration +from langchain.utils import get_from_dict_or_env +from pydantic import root_validator + +from core.third_party.spark.spark_llm import SparkLLMClient + + +class ChatSpark(BaseChatModel): + r"""Wrapper around Spark's large language model. + + To use, you should pass `app_id`, `api_key`, `api_secret` + as a named parameter to the constructor. + + Example: + .. code-block:: python + + client = SparkLLMClient( + app_id="", + api_key="", + api_secret="" + ) + """ + client: Any = None #: :meta private: + + max_tokens: int = 256 + """Denotes the number of tokens to predict per generation.""" + + temperature: Optional[float] = None + """A non-negative float that tunes the degree of randomness in generation.""" + + top_k: Optional[int] = None + """Number of most likely tokens to consider at each step.""" + + user_id: Optional[str] = None + """User ID to use for the model.""" + + streaming: bool = False + """Whether to stream the results.""" + + app_id: Optional[str] = None + api_key: Optional[str] = None + api_secret: Optional[str] = None + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + values["app_id"] = get_from_dict_or_env( + values, "app_id", "SPARK_APP_ID" + ) + values["api_key"] = get_from_dict_or_env( + values, "api_key", "SPARK_API_KEY" + ) + values["api_secret"] = get_from_dict_or_env( + values, "api_secret", "SPARK_API_SECRET" + ) + + values["client"] = SparkLLMClient( + app_id=values["app_id"], + api_key=values["api_key"], + api_secret=values["api_secret"], + ) + return values + + @property + def _default_params(self) -> Mapping[str, Any]: + """Get the default parameters for calling Anthropic API.""" + d = { + "max_tokens": self.max_tokens + } + if self.temperature is not None: + d["temperature"] = self.temperature + if self.top_k is not None: + d["top_k"] = self.top_k + return d + + @property + def _identifying_params(self) -> Mapping[str, Any]: + """Get the identifying parameters.""" + return {**{}, **self._default_params} + @property + def lc_secrets(self) -> Dict[str, str]: + return {"api_key": "API_KEY", "api_secret": "API_SECRET"} + + @property + def _llm_type(self) -> str: + """Return type of chat model.""" + return "spark-chat" + + @property + def lc_serializable(self) -> bool: + return True + + def _convert_messages_to_dicts(self, messages: List[BaseMessage]) -> list[dict]: + """Format a list of messages into a full dict list. + + Args: + messages (List[BaseMessage]): List of BaseMessage to combine. + + Returns: + list[dict] + """ + messages = messages.copy() # don't mutate the original list + + new_messages = [] + for message in messages: + if isinstance(message, ChatMessage): + new_messages.append({'role': 'user', 'content': message.content}) + elif isinstance(message, HumanMessage) or isinstance(message, SystemMessage): + new_messages.append({'role': 'user', 'content': message.content}) + elif isinstance(message, AIMessage): + new_messages.append({'role': 'assistant', 'content': message.content}) + else: + raise ValueError(f"Got unknown type {message}") + + return new_messages + + def _generate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + messages = self._convert_messages_to_dicts(messages) + + thread = threading.Thread(target=self.client.run, args=( + messages, + self.user_id, + self._default_params, + self.streaming + )) + thread.start() + + completion = "" + for content in self.client.subscribe(): + if isinstance(content, dict): + delta = content['data'] + else: + delta = content + + completion += delta + if self.streaming and run_manager: + run_manager.on_llm_new_token( + delta, + ) + + thread.join() + + if stop is not None: + completion = enforce_stop_tokens(completion, stop) + + message = AIMessage(content=completion) + return ChatResult(generations=[ChatGeneration(message=message)]) + + async def _agenerate( + self, + messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> ChatResult: + message = AIMessage(content='') + return ChatResult(generations=[ChatGeneration(message=message)]) + + def get_num_tokens(self, text: str) -> float: + """Calculate number of tokens.""" + total = Decimal(0) + words = re.findall(r'\b\w+\b|[{}]|\s'.format(re.escape(string.punctuation)), text) + for word in words: + if word: + if '\u4e00' <= word <= '\u9fff': # if chinese + total += Decimal('1.5') + else: + total += Decimal('0.8') + return int(total) diff --git a/api/core/third_party/langchain/llms/tongyi_llm.py b/api/core/third_party/langchain/llms/tongyi_llm.py new file mode 100644 index 000000000..c8241fe08 --- /dev/null +++ b/api/core/third_party/langchain/llms/tongyi_llm.py @@ -0,0 +1,82 @@ +from typing import Dict, Any, List, Optional + +from langchain.callbacks.manager import CallbackManagerForLLMRun +from langchain.llms import Tongyi +from langchain.llms.tongyi import generate_with_retry, stream_generate_with_retry +from langchain.schema import Generation, LLMResult + + +class EnhanceTongyi(Tongyi): + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + normal_params = { + "top_p": self.top_p, + "api_key": self.dashscope_api_key + } + + return {**normal_params, **self.model_kwargs} + + def _generate( + self, + prompts: List[str], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> LLMResult: + generations = [] + params: Dict[str, Any] = { + **{"model": self.model_name}, + **self._default_params, + **kwargs, + } + if self.streaming: + if len(prompts) > 1: + raise ValueError("Cannot stream results with multiple prompts.") + params["stream"] = True + text = '' + for stream_resp in stream_generate_with_retry( + self, prompt=prompts[0], **params + ): + if not generations: + current_text = stream_resp["output"]["text"] + else: + current_text = stream_resp["output"]["text"][len(text):] + + text = stream_resp["output"]["text"] + + generations.append( + [ + Generation( + text=current_text, + generation_info=dict( + finish_reason=stream_resp["output"]["finish_reason"], + ), + ) + ] + ) + + if run_manager: + run_manager.on_llm_new_token( + current_text, + verbose=self.verbose, + logprobs=None, + ) + else: + for prompt in prompts: + completion = generate_with_retry( + self, + prompt=prompt, + **params, + ) + generations.append( + [ + Generation( + text=completion["output"]["text"], + generation_info=dict( + finish_reason=completion["output"]["finish_reason"], + ), + ) + ] + ) + return LLMResult(generations=generations) diff --git a/api/core/third_party/langchain/llms/wenxin.py b/api/core/third_party/langchain/llms/wenxin.py new file mode 100644 index 000000000..a10fb82b7 --- /dev/null +++ b/api/core/third_party/langchain/llms/wenxin.py @@ -0,0 +1,233 @@ +"""Wrapper around Wenxin APIs.""" +from __future__ import annotations + +import json +import logging +from typing import ( + Any, + Dict, + List, + Optional, Iterator, +) + +import requests +from langchain.llms.utils import enforce_stop_tokens +from langchain.schema.output import GenerationChunk +from pydantic import BaseModel, Extra, Field, PrivateAttr, root_validator + +from langchain.callbacks.manager import ( + CallbackManagerForLLMRun, +) +from langchain.llms.base import LLM +from langchain.utils import get_from_dict_or_env + +logger = logging.getLogger(__name__) + + +class _WenxinEndpointClient(BaseModel): + """An API client that talks to a Wenxin llm endpoint.""" + + base_url: str = "https://aip.baidubce.com/rpc/2.0/ai_custom/v1/wenxinworkshop/chat/" + secret_key: str + api_key: str + + def get_access_token(self) -> str: + url = f"https://aip.baidubce.com/oauth/2.0/token?client_id={self.api_key}" \ + f"&client_secret={self.secret_key}&grant_type=client_credentials" + + headers = { + 'Content-Type': 'application/json', + 'Accept': 'application/json' + } + + response = requests.post(url, headers=headers) + if not response.ok: + raise ValueError(f"Wenxin HTTP {response.status_code} error: {response.text}") + if 'error' in response.json(): + raise ValueError( + f"Wenxin API {response.json()['error']}" + f" error: {response.json()['error_description']}" + ) + + access_token = response.json()['access_token'] + + # todo add cache + + return access_token + + def post(self, request: dict) -> Any: + if 'model' not in request: + raise ValueError(f"Wenxin Model name is required") + + model_url_map = { + 'ernie-bot': 'completions', + 'ernie-bot-turbo': 'eb-instant', + 'bloomz-7b': 'bloomz_7b1', + } + + stream = 'stream' in request and request['stream'] + + access_token = self.get_access_token() + api_url = f"{self.base_url}{model_url_map[request['model']]}?access_token={access_token}" + + headers = {"Content-Type": "application/json"} + response = requests.post(api_url, + headers=headers, + json=request, + stream=stream) + if not response.ok: + raise ValueError(f"Wenxin HTTP {response.status_code} error: {response.text}") + + if not stream: + json_response = response.json() + if 'error_code' in json_response: + raise ValueError( + f"Wenxin API {json_response['error_code']}" + f" error: {json_response['error_msg']}" + ) + return json_response["result"] + else: + return response + + +class Wenxin(LLM): + """Wrapper around Wenxin large language models. + To use, you should have the environment variable + ``WENXIN_API_KEY`` and ``WENXIN_SECRET_KEY`` set with your API key, + or pass them as a named parameter to the constructor. + Example: + .. code-block:: python + from langchain.llms.wenxin import Wenxin + wenxin = Wenxin(model="", api_key="my-api-key", + secret_key="my-group-id") + """ + + _client: _WenxinEndpointClient = PrivateAttr() + model: str = "ernie-bot" + """Model name to use.""" + temperature: float = 0.7 + """A non-negative float that tunes the degree of randomness in generation.""" + top_p: float = 0.95 + """Total probability mass of tokens to consider at each step.""" + model_kwargs: Dict[str, Any] = Field(default_factory=dict) + """Holds any model parameters valid for `create` call not explicitly specified.""" + streaming: bool = False + """Whether to stream the response or return it all at once.""" + api_key: Optional[str] = None + secret_key: Optional[str] = None + + class Config: + """Configuration for this pydantic object.""" + + extra = Extra.forbid + + @root_validator() + def validate_environment(cls, values: Dict) -> Dict: + """Validate that api key and python package exists in environment.""" + values["api_key"] = get_from_dict_or_env( + values, "api_key", "WENXIN_API_KEY" + ) + values["secret_key"] = get_from_dict_or_env( + values, "secret_key", "WENXIN_SECRET_KEY" + ) + return values + + @property + def _default_params(self) -> Dict[str, Any]: + """Get the default parameters for calling OpenAI API.""" + return { + "model": self.model, + "temperature": self.temperature, + "top_p": self.top_p, + "stream": self.streaming, + **self.model_kwargs, + } + + @property + def _identifying_params(self) -> Dict[str, Any]: + """Get the identifying parameters.""" + return {**{"model": self.model}, **self._default_params} + + @property + def _llm_type(self) -> str: + """Return type of llm.""" + return "wenxin" + + def __init__(self, **data: Any): + super().__init__(**data) + self._client = _WenxinEndpointClient( + api_key=self.api_key, + secret_key=self.secret_key, + ) + + def _call( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> str: + r"""Call out to Wenxin's completion endpoint to chat + Args: + prompt: The prompt to pass into the model. + Returns: + The string generated by the model. + Example: + .. code-block:: python + response = wenxin("Tell me a joke.") + """ + if self.streaming: + completion = "" + for chunk in self._stream( + prompt=prompt, stop=stop, run_manager=run_manager, **kwargs + ): + completion += chunk.text + else: + request = self._default_params + request["messages"] = [{"role": "user", "content": prompt}] + request.update(kwargs) + completion = self._client.post(request) + + if stop is not None: + completion = enforce_stop_tokens(completion, stop) + + return completion + + def _stream( + self, + prompt: str, + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any, + ) -> Iterator[GenerationChunk]: + r"""Call wenxin completion_stream and return the resulting generator. + + Args: + prompt: The prompt to pass into the model. + stop: Optional list of stop words to use when generating. + Returns: + A generator representing the stream of tokens from Wenxin. + Example: + .. code-block:: python + + prompt = "Write a poem about a stream." + prompt = f"\n\nHuman: {prompt}\n\nAssistant:" + generator = wenxin.stream(prompt) + for token in generator: + yield token + """ + request = self._default_params + request["messages"] = [{"role": "user", "content": prompt}] + request.update(kwargs) + + for token in self._client.post(request).iter_lines(): + if token: + token = token.decode("utf-8") + completion = json.loads(token[5:]) + + yield GenerationChunk(text=completion['result']) + if run_manager: + run_manager.on_llm_new_token(completion['result']) + + if completion['is_end']: + break diff --git a/api/core/third_party/spark/__init__.py b/api/core/third_party/spark/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/core/third_party/spark/spark_llm.py b/api/core/third_party/spark/spark_llm.py new file mode 100644 index 000000000..2b6d9b498 --- /dev/null +++ b/api/core/third_party/spark/spark_llm.py @@ -0,0 +1,150 @@ +import base64 +import datetime +import hashlib +import hmac +import json +import queue +from typing import Optional +from urllib.parse import urlparse +import ssl +from datetime import datetime +from time import mktime +from urllib.parse import urlencode +from wsgiref.handlers import format_date_time + +import websocket + + +class SparkLLMClient: + def __init__(self, app_id: str, api_key: str, api_secret: str): + + self.api_base = "ws://spark-api.xf-yun.com/v1.1/chat" + self.app_id = app_id + self.ws_url = self.create_url( + urlparse(self.api_base).netloc, + urlparse(self.api_base).path, + self.api_base, + api_key, + api_secret + ) + + self.queue = queue.Queue() + self.blocking_message = '' + + def create_url(self, host: str, path: str, api_base: str, api_key: str, api_secret: str) -> str: + # generate timestamp by RFC1123 + now = datetime.now() + date = format_date_time(mktime(now.timetuple())) + + signature_origin = "host: " + host + "\n" + signature_origin += "date: " + date + "\n" + signature_origin += "GET " + path + " HTTP/1.1" + + # encrypt using hmac-sha256 + signature_sha = hmac.new(api_secret.encode('utf-8'), signature_origin.encode('utf-8'), + digestmod=hashlib.sha256).digest() + + signature_sha_base64 = base64.b64encode(signature_sha).decode(encoding='utf-8') + + authorization_origin = f'api_key="{api_key}", algorithm="hmac-sha256", headers="host date request-line", signature="{signature_sha_base64}"' + + authorization = base64.b64encode(authorization_origin.encode('utf-8')).decode(encoding='utf-8') + + v = { + "authorization": authorization, + "date": date, + "host": host + } + # generate url + url = api_base + '?' + urlencode(v) + return url + + def run(self, messages: list, user_id: str, + model_kwargs: Optional[dict] = None, streaming: bool = False): + websocket.enableTrace(False) + ws = websocket.WebSocketApp( + self.ws_url, + on_message=self.on_message, + on_error=self.on_error, + on_close=self.on_close, + on_open=self.on_open + ) + ws.messages = messages + ws.user_id = user_id + ws.model_kwargs = model_kwargs + ws.streaming = streaming + ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) + + def on_error(self, ws, error): + self.queue.put({'error': error}) + ws.close() + + def on_close(self, ws, close_status_code, close_reason): + self.queue.put({'done': True}) + + def on_open(self, ws): + self.blocking_message = '' + data = json.dumps(self.gen_params( + messages=ws.messages, + user_id=ws.user_id, + model_kwargs=ws.model_kwargs + )) + ws.send(data) + + def on_message(self, ws, message): + data = json.loads(message) + code = data['header']['code'] + if code != 0: + self.queue.put({'error': f"Code: {code}, Error: {data['header']['message']}"}) + ws.close() + else: + choices = data["payload"]["choices"] + status = choices["status"] + content = choices["text"][0]["content"] + if ws.streaming: + self.queue.put({'data': content}) + else: + self.blocking_message += content + + if status == 2: + if not ws.streaming: + self.queue.put({'data': self.blocking_message}) + ws.close() + + def gen_params(self, messages: list, user_id: str, + model_kwargs: Optional[dict] = None) -> dict: + data = { + "header": { + "app_id": self.app_id, + "uid": user_id + }, + "parameter": { + "chat": { + "domain": "general" + } + }, + "payload": { + "message": { + "text": messages + } + } + } + + if model_kwargs: + data['parameter']['chat'].update(model_kwargs) + + return data + + def subscribe(self): + while True: + content = self.queue.get() + if 'error' in content: + raise SparkError(content['error']) + + if 'data' not in content: + break + yield content + + +class SparkError(Exception): + pass diff --git a/api/core/tool/dataset_index_tool.py b/api/core/tool/dataset_index_tool.py deleted file mode 100644 index c459ebaf1..000000000 --- a/api/core/tool/dataset_index_tool.py +++ /dev/null @@ -1,102 +0,0 @@ -from flask import current_app -from langchain.embeddings import OpenAIEmbeddings -from langchain.tools import BaseTool - -from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCallbackHandler -from core.embedding.cached_embedding import CacheEmbedding -from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig -from core.index.vector_index.vector_index import VectorIndex -from core.llm.llm_builder import LLMBuilder -from models.dataset import Dataset, DocumentSegment - - -class DatasetTool(BaseTool): - """Tool for querying a Dataset.""" - - dataset: Dataset - k: int = 2 - - def _run(self, tool_input: str) -> str: - if self.dataset.indexing_technique == "economy": - # use keyword table query - kw_table_index = KeywordTableIndex( - dataset=self.dataset, - config=KeywordTableConfig( - max_keywords_per_chunk=5 - ) - ) - - documents = kw_table_index.search(tool_input, search_kwargs={'k': self.k}) - return str("\n".join([document.page_content for document in documents])) - else: - model_credentials = LLMBuilder.get_model_credentials( - tenant_id=self.dataset.tenant_id, - model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'), - model_name='text-embedding-ada-002' - ) - - embeddings = CacheEmbedding(OpenAIEmbeddings( - **model_credentials - )) - - vector_index = VectorIndex( - dataset=self.dataset, - config=current_app.config, - embeddings=embeddings - ) - - documents = vector_index.search( - tool_input, - search_type='similarity', - search_kwargs={ - 'k': self.k - } - ) - - hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id) - hit_callback.on_tool_end(documents) - document_context_list = [] - index_node_ids = [document.metadata['doc_id'] for document in documents] - segments = DocumentSegment.query.filter(DocumentSegment.completed_at.isnot(None), - DocumentSegment.status == 'completed', - DocumentSegment.enabled == True, - DocumentSegment.index_node_id.in_(index_node_ids) - ).all() - - if segments: - for segment in segments: - if segment.answer: - document_context_list.append(segment.answer) - else: - document_context_list.append(segment.content) - - return str("\n".join(document_context_list)) - - async def _arun(self, tool_input: str) -> str: - model_credentials = LLMBuilder.get_model_credentials( - tenant_id=self.dataset.tenant_id, - model_provider=LLMBuilder.get_default_provider(self.dataset.tenant_id, 'text-embedding-ada-002'), - model_name='text-embedding-ada-002' - ) - - embeddings = CacheEmbedding(OpenAIEmbeddings( - **model_credentials - )) - - vector_index = VectorIndex( - dataset=self.dataset, - config=current_app.config, - embeddings=embeddings - ) - - documents = await vector_index.asearch( - tool_input, - search_type='similarity', - search_kwargs={ - 'k': 10 - } - ) - - hit_callback = DatasetIndexToolCallbackHandler(self.dataset.id) - hit_callback.on_tool_end(documents) - return str("\n".join([document.page_content for document in documents])) diff --git a/api/core/tool/dataset_retriever_tool.py b/api/core/tool/dataset_retriever_tool.py index 35f15bbce..57ff10ae9 100644 --- a/api/core/tool/dataset_retriever_tool.py +++ b/api/core/tool/dataset_retriever_tool.py @@ -2,7 +2,6 @@ import re from typing import Type from flask import current_app -from langchain.embeddings import OpenAIEmbeddings from langchain.tools import BaseTool from pydantic import Field, BaseModel @@ -10,7 +9,7 @@ from core.callback_handler.index_tool_callback_handler import DatasetIndexToolCa from core.embedding.cached_embedding import CacheEmbedding from core.index.keyword_table_index.keyword_table_index import KeywordTableIndex, KeywordTableConfig from core.index.vector_index.vector_index import VectorIndex -from core.llm.llm_builder import LLMBuilder +from core.model_providers.model_factory import ModelFactory from extensions.ext_database import db from models.dataset import Dataset, DocumentSegment @@ -71,15 +70,11 @@ class DatasetRetrieverTool(BaseTool): documents = kw_table_index.search(query, search_kwargs={'k': self.k}) return str("\n".join([document.page_content for document in documents])) else: - model_credentials = LLMBuilder.get_model_credentials( - tenant_id=dataset.tenant_id, - model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'), - model_name='text-embedding-ada-002' + embedding_model = ModelFactory.get_embedding_model( + tenant_id=dataset.tenant_id ) - embeddings = CacheEmbedding(OpenAIEmbeddings( - **model_credentials - )) + embeddings = CacheEmbedding(embedding_model) vector_index = VectorIndex( dataset=dataset, diff --git a/api/events/event_handlers/__init__.py b/api/events/event_handlers/__init__.py index 28a94d7a7..02020e919 100644 --- a/api/events/event_handlers/__init__.py +++ b/api/events/event_handlers/__init__.py @@ -1,7 +1,5 @@ from .create_installed_app_when_app_created import handle from .delete_installed_app_when_app_deleted import handle -from .create_provider_when_tenant_created import handle -from .create_provider_when_tenant_updated import handle from .clean_when_document_deleted import handle from .clean_when_dataset_deleted import handle from .update_app_dataset_join_when_app_model_config_updated import handle diff --git a/api/events/event_handlers/create_provider_when_tenant_created.py b/api/events/event_handlers/create_provider_when_tenant_created.py deleted file mode 100644 index 0d3567025..000000000 --- a/api/events/event_handlers/create_provider_when_tenant_created.py +++ /dev/null @@ -1,24 +0,0 @@ -from flask import current_app - -from events.tenant_event import tenant_was_updated -from models.provider import ProviderName -from services.provider_service import ProviderService - - -@tenant_was_updated.connect -def handle(sender, **kwargs): - tenant = sender - if tenant.status == 'normal': - ProviderService.create_system_provider( - tenant, - ProviderName.OPENAI.value, - current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'], - True - ) - - ProviderService.create_system_provider( - tenant, - ProviderName.ANTHROPIC.value, - current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'], - True - ) diff --git a/api/events/event_handlers/create_provider_when_tenant_updated.py b/api/events/event_handlers/create_provider_when_tenant_updated.py deleted file mode 100644 index 366e13c59..000000000 --- a/api/events/event_handlers/create_provider_when_tenant_updated.py +++ /dev/null @@ -1,24 +0,0 @@ -from flask import current_app - -from events.tenant_event import tenant_was_created -from models.provider import ProviderName -from services.provider_service import ProviderService - - -@tenant_was_created.connect -def handle(sender, **kwargs): - tenant = sender - if tenant.status == 'normal': - ProviderService.create_system_provider( - tenant, - ProviderName.OPENAI.value, - current_app.config['OPENAI_HOSTED_QUOTA_LIMIT'], - True - ) - - ProviderService.create_system_provider( - tenant, - ProviderName.ANTHROPIC.value, - current_app.config['ANTHROPIC_HOSTED_QUOTA_LIMIT'], - True - ) diff --git a/api/events/event_handlers/generate_conversation_name_when_first_message_created.py b/api/events/event_handlers/generate_conversation_name_when_first_message_created.py index 4c1bbee53..dc18bf44f 100644 --- a/api/events/event_handlers/generate_conversation_name_when_first_message_created.py +++ b/api/events/event_handlers/generate_conversation_name_when_first_message_created.py @@ -23,7 +23,6 @@ def handle(sender, **kwargs): conversation.name = name except: conversation.name = 'New Chat' - logging.exception('generate_conversation_name failed') db.session.add(conversation) db.session.commit() diff --git a/api/extensions/ext_stripe.py b/api/extensions/ext_stripe.py new file mode 100644 index 000000000..3a192c081 --- /dev/null +++ b/api/extensions/ext_stripe.py @@ -0,0 +1,6 @@ +import stripe + + +def init_app(app): + if app.config.get('STRIPE_API_KEY'): + stripe.api_key = app.config.get('STRIPE_API_KEY') diff --git a/api/libs/rsa.py b/api/libs/rsa.py index 8741989a9..a04282a5f 100644 --- a/api/libs/rsa.py +++ b/api/libs/rsa.py @@ -1,16 +1,14 @@ # -*- coding:utf-8 -*- import hashlib -from Crypto.Cipher import PKCS1_OAEP +from Crypto.Cipher import PKCS1_OAEP, AES from Crypto.PublicKey import RSA +from Crypto.Random import get_random_bytes from extensions.ext_redis import redis_client from extensions.ext_storage import storage -# TODO: PKCS1_OAEP is no longer recommended for new systems and protocols. It is recommended to migrate to PKCS1_PSS. - - def generate_key_pair(tenant_id): private_key = RSA.generate(2048) public_key = private_key.publickey() @@ -25,14 +23,26 @@ def generate_key_pair(tenant_id): return pem_public.decode() +prefix_hybrid = b"HYBRID:" + + def encrypt(text, public_key): if isinstance(public_key, str): public_key = public_key.encode() + aes_key = get_random_bytes(16) + cipher_aes = AES.new(aes_key, AES.MODE_EAX) + + ciphertext, tag = cipher_aes.encrypt_and_digest(text.encode()) + rsa_key = RSA.import_key(public_key) - cipher = PKCS1_OAEP.new(rsa_key) - encrypted_text = cipher.encrypt(text.encode()) - return encrypted_text + cipher_rsa = PKCS1_OAEP.new(rsa_key) + + enc_aes_key = cipher_rsa.encrypt(aes_key) + + encrypted_data = enc_aes_key + cipher_aes.nonce + tag + ciphertext + + return prefix_hybrid + encrypted_data def decrypt(encrypted_text, tenant_id): @@ -49,8 +59,23 @@ def decrypt(encrypted_text, tenant_id): redis_client.setex(cache_key, 120, private_key) rsa_key = RSA.import_key(private_key) - cipher = PKCS1_OAEP.new(rsa_key) - decrypted_text = cipher.decrypt(encrypted_text) + cipher_rsa = PKCS1_OAEP.new(rsa_key) + + if encrypted_text.startswith(prefix_hybrid): + encrypted_text = encrypted_text[len(prefix_hybrid):] + + enc_aes_key = encrypted_text[:rsa_key.size_in_bytes()] + nonce = encrypted_text[rsa_key.size_in_bytes():rsa_key.size_in_bytes() + 16] + tag = encrypted_text[rsa_key.size_in_bytes() + 16:rsa_key.size_in_bytes() + 32] + ciphertext = encrypted_text[rsa_key.size_in_bytes() + 32:] + + aes_key = cipher_rsa.decrypt(enc_aes_key) + + cipher_aes = AES.new(aes_key, AES.MODE_EAX, nonce=nonce) + decrypted_text = cipher_aes.decrypt_and_verify(ciphertext, tag) + else: + decrypted_text = cipher_rsa.decrypt(encrypted_text) + return decrypted_text.decode() diff --git a/api/migrations/versions/16fa53d9faec_add_provider_model_support.py b/api/migrations/versions/16fa53d9faec_add_provider_model_support.py new file mode 100644 index 000000000..92b1fba9c --- /dev/null +++ b/api/migrations/versions/16fa53d9faec_add_provider_model_support.py @@ -0,0 +1,79 @@ +"""add provider model support + +Revision ID: 16fa53d9faec +Revises: 8d2d099ceb74 +Create Date: 2023-08-06 16:57:51.248337 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '16fa53d9faec' +down_revision = '8d2d099ceb74' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('provider_models', + sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', postgresql.UUID(), nullable=False), + sa.Column('provider_name', sa.String(length=40), nullable=False), + sa.Column('model_name', sa.String(length=40), nullable=False), + sa.Column('model_type', sa.String(length=40), nullable=False), + sa.Column('encrypted_config', sa.Text(), nullable=True), + sa.Column('is_valid', sa.Boolean(), server_default=sa.text('false'), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='provider_model_pkey'), + sa.UniqueConstraint('tenant_id', 'provider_name', 'model_name', 'model_type', name='unique_provider_model_name') + ) + with op.batch_alter_table('provider_models', schema=None) as batch_op: + batch_op.create_index('provider_model_tenant_id_provider_idx', ['tenant_id', 'provider_name'], unique=False) + + op.create_table('tenant_default_models', + sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', postgresql.UUID(), nullable=False), + sa.Column('provider_name', sa.String(length=40), nullable=False), + sa.Column('model_name', sa.String(length=40), nullable=False), + sa.Column('model_type', sa.String(length=40), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='tenant_default_model_pkey') + ) + with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: + batch_op.create_index('tenant_default_model_tenant_id_provider_type_idx', ['tenant_id', 'provider_name', 'model_type'], unique=False) + + op.create_table('tenant_preferred_model_providers', + sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', postgresql.UUID(), nullable=False), + sa.Column('provider_name', sa.String(length=40), nullable=False), + sa.Column('preferred_provider_type', sa.String(length=40), nullable=False), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='tenant_preferred_model_provider_pkey') + ) + with op.batch_alter_table('tenant_preferred_model_providers', schema=None) as batch_op: + batch_op.create_index('tenant_preferred_model_provider_tenant_provider_idx', ['tenant_id', 'provider_name'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tenant_preferred_model_providers', schema=None) as batch_op: + batch_op.drop_index('tenant_preferred_model_provider_tenant_provider_idx') + + op.drop_table('tenant_preferred_model_providers') + with op.batch_alter_table('tenant_default_models', schema=None) as batch_op: + batch_op.drop_index('tenant_default_model_tenant_id_provider_type_idx') + + op.drop_table('tenant_default_models') + with op.batch_alter_table('provider_models', schema=None) as batch_op: + batch_op.drop_index('provider_model_tenant_id_provider_idx') + + op.drop_table('provider_models') + # ### end Alembic commands ### diff --git a/api/migrations/versions/5022897aaceb_add_model_name_in_embedding.py b/api/migrations/versions/5022897aaceb_add_model_name_in_embedding.py new file mode 100644 index 000000000..182db6ccc --- /dev/null +++ b/api/migrations/versions/5022897aaceb_add_model_name_in_embedding.py @@ -0,0 +1,36 @@ +"""add model name in embedding + +Revision ID: 5022897aaceb +Revises: bf0aec5ba2cf +Create Date: 2023-08-11 14:38:15.499460 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = '5022897aaceb' +down_revision = 'bf0aec5ba2cf' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('embeddings', schema=None) as batch_op: + batch_op.add_column(sa.Column('model_name', sa.String(length=40), server_default=sa.text("'text-embedding-ada-002'::character varying"), nullable=False)) + batch_op.drop_constraint('embedding_hash_idx', type_='unique') + batch_op.create_unique_constraint('embedding_hash_idx', ['model_name', 'hash']) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('embeddings', schema=None) as batch_op: + batch_op.drop_constraint('embedding_hash_idx', type_='unique') + batch_op.create_unique_constraint('embedding_hash_idx', ['hash']) + batch_op.drop_column('model_name') + + # ### end Alembic commands ### diff --git a/api/migrations/versions/bf0aec5ba2cf_add_provider_order.py b/api/migrations/versions/bf0aec5ba2cf_add_provider_order.py new file mode 100644 index 000000000..aa9f74fe3 --- /dev/null +++ b/api/migrations/versions/bf0aec5ba2cf_add_provider_order.py @@ -0,0 +1,52 @@ +"""add provider order + +Revision ID: bf0aec5ba2cf +Revises: e35ed59becda +Create Date: 2023-08-10 00:03:44.273430 + +""" +from alembic import op +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = 'bf0aec5ba2cf' +down_revision = 'e35ed59becda' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('provider_orders', + sa.Column('id', postgresql.UUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False), + sa.Column('tenant_id', postgresql.UUID(), nullable=False), + sa.Column('provider_name', sa.String(length=40), nullable=False), + sa.Column('account_id', postgresql.UUID(), nullable=False), + sa.Column('payment_product_id', sa.String(length=191), nullable=False), + sa.Column('payment_id', sa.String(length=191), nullable=True), + sa.Column('transaction_id', sa.String(length=191), nullable=True), + sa.Column('quantity', sa.Integer(), server_default=sa.text('1'), nullable=False), + sa.Column('currency', sa.String(length=40), nullable=True), + sa.Column('total_amount', sa.Integer(), nullable=True), + sa.Column('payment_status', sa.String(length=40), server_default=sa.text("'wait_pay'::character varying"), nullable=False), + sa.Column('paid_at', sa.DateTime(), nullable=True), + sa.Column('pay_failed_at', sa.DateTime(), nullable=True), + sa.Column('refunded_at', sa.DateTime(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.Column('updated_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP(0)'), nullable=False), + sa.PrimaryKeyConstraint('id', name='provider_order_pkey') + ) + with op.batch_alter_table('provider_orders', schema=None) as batch_op: + batch_op.create_index('provider_order_tenant_provider_idx', ['tenant_id', 'provider_name'], unique=False) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('provider_orders', schema=None) as batch_op: + batch_op.drop_index('provider_order_tenant_provider_idx') + + op.drop_table('provider_orders') + # ### end Alembic commands ### diff --git a/api/migrations/versions/e35ed59becda_modify_quota_limit_field_type.py b/api/migrations/versions/e35ed59becda_modify_quota_limit_field_type.py new file mode 100644 index 000000000..e9056d57f --- /dev/null +++ b/api/migrations/versions/e35ed59becda_modify_quota_limit_field_type.py @@ -0,0 +1,46 @@ +"""modify quota limit field type + +Revision ID: e35ed59becda +Revises: 16fa53d9faec +Create Date: 2023-08-09 22:20:31.577953 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'e35ed59becda' +down_revision = '16fa53d9faec' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_limit', + existing_type=sa.INTEGER(), + type_=sa.BigInteger(), + existing_nullable=True) + batch_op.alter_column('quota_used', + existing_type=sa.INTEGER(), + type_=sa.BigInteger(), + existing_nullable=True) + + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('providers', schema=None) as batch_op: + batch_op.alter_column('quota_used', + existing_type=sa.BigInteger(), + type_=sa.INTEGER(), + existing_nullable=True) + batch_op.alter_column('quota_limit', + existing_type=sa.BigInteger(), + type_=sa.INTEGER(), + existing_nullable=True) + + # ### end Alembic commands ### diff --git a/api/models/dataset.py b/api/models/dataset.py index b63b898df..ecf087ef6 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -9,6 +9,7 @@ from extensions.ext_database import db from models.account import Account from models.model import App, UploadFile + class Dataset(db.Model): __tablename__ = 'datasets' __table_args__ = ( @@ -268,7 +269,7 @@ class Document(db.Model): @property def average_segment_length(self): if self.word_count and self.word_count != 0 and self.segment_count and self.segment_count != 0: - return self.word_count//self.segment_count + return self.word_count // self.segment_count return 0 @property @@ -346,16 +347,6 @@ class DocumentSegment(db.Model): def document(self): return db.session.query(Document).filter(Document.id == self.document_id).first() - @property - def embedding(self): - embedding = db.session.query(Embedding).filter(Embedding.hash == self.index_node_hash).first() \ - if self.index_node_hash else None - - if embedding: - return embedding.embedding - - return None - @property def previous_segment(self): return db.session.query(DocumentSegment).filter( @@ -436,10 +427,12 @@ class Embedding(db.Model): __tablename__ = 'embeddings' __table_args__ = ( db.PrimaryKeyConstraint('id', name='embedding_pkey'), - db.UniqueConstraint('hash', name='embedding_hash_idx') + db.UniqueConstraint('model_name', 'hash', name='embedding_hash_idx') ) id = db.Column(UUID, primary_key=True, server_default=db.text('uuid_generate_v4()')) + model_name = db.Column(db.String(40), nullable=False, + server_default=db.text("'text-embedding-ada-002'::character varying")) hash = db.Column(db.String(64), nullable=False) embedding = db.Column(db.LargeBinary, nullable=False) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) @@ -448,4 +441,4 @@ class Embedding(db.Model): self.embedding = pickle.dumps(embedding_data, protocol=pickle.HIGHEST_PROTOCOL) def get_embedding(self) -> list[float]: - return pickle.loads(self.embedding) \ No newline at end of file + return pickle.loads(self.embedding) diff --git a/api/models/provider.py b/api/models/provider.py index e4ecfa124..63e9785a9 100644 --- a/api/models/provider.py +++ b/api/models/provider.py @@ -9,25 +9,30 @@ class ProviderType(Enum): CUSTOM = 'custom' SYSTEM = 'system' - -class ProviderName(Enum): - OPENAI = 'openai' - AZURE_OPENAI = 'azure_openai' - ANTHROPIC = 'anthropic' - COHERE = 'cohere' - HUGGINGFACEHUB = 'huggingfacehub' - @staticmethod def value_of(value): - for member in ProviderName: + for member in ProviderType: if member.value == value: return member raise ValueError(f"No matching enum found for value '{value}'") class ProviderQuotaType(Enum): - MONTHLY = 'monthly' + PAID = 'paid' + """hosted paid quota""" + + FREE = 'free' + """third-party free quota""" + TRIAL = 'trial' + """hosted trial quota""" + + @staticmethod + def value_of(value): + for member in ProviderQuotaType: + if member.value == value: + return member + raise ValueError(f"No matching enum found for value '{value}'") class Provider(db.Model): @@ -50,8 +55,8 @@ class Provider(db.Model): last_used = db.Column(db.DateTime, nullable=True) quota_type = db.Column(db.String(40), nullable=True, server_default=db.text("''::character varying")) - quota_limit = db.Column(db.Integer, nullable=True) - quota_used = db.Column(db.Integer, default=0) + quota_limit = db.Column(db.BigInteger, nullable=True) + quota_used = db.Column(db.BigInteger, default=0) created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) @@ -75,3 +80,96 @@ class Provider(db.Model): return self.is_valid else: return self.is_valid and self.token_is_set + + +class ProviderModel(db.Model): + """ + Provider model representing the API provider_models and their configurations. + """ + __tablename__ = 'provider_models' + __table_args__ = ( + db.PrimaryKeyConstraint('id', name='provider_model_pkey'), + db.Index('provider_model_tenant_id_provider_idx', 'tenant_id', 'provider_name'), + db.UniqueConstraint('tenant_id', 'provider_name', 'model_name', 'model_type', name='unique_provider_model_name') + ) + + id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) + tenant_id = db.Column(UUID, nullable=False) + provider_name = db.Column(db.String(40), nullable=False) + model_name = db.Column(db.String(40), nullable=False) + model_type = db.Column(db.String(40), nullable=False) + encrypted_config = db.Column(db.Text, nullable=True) + is_valid = db.Column(db.Boolean, nullable=False, server_default=db.text('false')) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + + +class TenantDefaultModel(db.Model): + __tablename__ = 'tenant_default_models' + __table_args__ = ( + db.PrimaryKeyConstraint('id', name='tenant_default_model_pkey'), + db.Index('tenant_default_model_tenant_id_provider_type_idx', 'tenant_id', 'provider_name', 'model_type'), + ) + + id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) + tenant_id = db.Column(UUID, nullable=False) + provider_name = db.Column(db.String(40), nullable=False) + model_name = db.Column(db.String(40), nullable=False) + model_type = db.Column(db.String(40), nullable=False) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + + +class TenantPreferredModelProvider(db.Model): + __tablename__ = 'tenant_preferred_model_providers' + __table_args__ = ( + db.PrimaryKeyConstraint('id', name='tenant_preferred_model_provider_pkey'), + db.Index('tenant_preferred_model_provider_tenant_provider_idx', 'tenant_id', 'provider_name'), + ) + + id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) + tenant_id = db.Column(UUID, nullable=False) + provider_name = db.Column(db.String(40), nullable=False) + preferred_provider_type = db.Column(db.String(40), nullable=False) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + + +class ProviderOrderPaymentStatus(Enum): + WAIT_PAY = 'wait_pay' + PAID = 'paid' + PAY_FAILED = 'pay_failed' + REFUNDED = 'refunded' + + @staticmethod + def value_of(value): + for member in ProviderOrderPaymentStatus: + if member.value == value: + return member + raise ValueError(f"No matching enum found for value '{value}'") + + + +class ProviderOrder(db.Model): + __tablename__ = 'provider_orders' + __table_args__ = ( + db.PrimaryKeyConstraint('id', name='provider_order_pkey'), + db.Index('provider_order_tenant_provider_idx', 'tenant_id', 'provider_name'), + ) + + id = db.Column(UUID, server_default=db.text('uuid_generate_v4()')) + tenant_id = db.Column(UUID, nullable=False) + provider_name = db.Column(db.String(40), nullable=False) + account_id = db.Column(UUID, nullable=False) + payment_product_id = db.Column(db.String(191), nullable=False) + payment_id = db.Column(db.String(191)) + transaction_id = db.Column(db.String(191)) + quantity = db.Column(db.Integer, nullable=False, server_default=db.text('1')) + currency = db.Column(db.String(40)) + total_amount = db.Column(db.Integer) + payment_status = db.Column(db.String(40), nullable=False, server_default=db.text("'wait_pay'::character varying")) + paid_at = db.Column(db.DateTime) + pay_failed_at = db.Column(db.DateTime) + refunded_at = db.Column(db.DateTime) + created_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) + updated_at = db.Column(db.DateTime, nullable=False, server_default=db.text('CURRENT_TIMESTAMP(0)')) diff --git a/api/requirements.txt b/api/requirements.txt index ccbb0e18c..ac87a58ea 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -10,12 +10,13 @@ flask-session2==1.3.1 flask-cors==3.0.10 gunicorn~=21.2.0 gevent~=22.10.2 -langchain==0.0.239 +langchain==0.0.250 openai~=0.27.8 psycopg2-binary~=2.9.6 pycryptodome==3.17 python-dotenv==1.0.0 pytest~=7.3.1 +pytest-mock~=3.11.1 tiktoken==0.3.3 Authlib==1.2.0 boto3~=1.26.123 @@ -40,4 +41,10 @@ newspaper3k==0.2.8 google-api-python-client==2.90.0 wikipedia==1.4.0 readabilipy==0.2.0 -google-search-results==2.4.2 \ No newline at end of file +google-search-results==2.4.2 +replicate~=0.9.0 +websocket-client~=1.6.1 +dashscope~=1.5.0 +huggingface_hub~=0.16.4 +transformers~=4.31.0 +stripe~=5.5.0 \ No newline at end of file diff --git a/api/services/app_model_config_service.py b/api/services/app_model_config_service.py index a5af67e30..abcb7e623 100644 --- a/api/services/app_model_config_service.py +++ b/api/services/app_model_config_service.py @@ -2,42 +2,11 @@ import re import uuid from core.agent.agent_executor import PlanningStrategy -from core.constant import llm_constant +from core.model_providers.model_provider_factory import ModelProviderFactory +from core.model_providers.models.entity.model_params import ModelType from models.account import Account from services.dataset_service import DatasetService -from core.llm.llm_builder import LLMBuilder -MODEL_PROVIDERS = [ - 'openai', - 'anthropic', -] - -MODELS_BY_APP_MODE = { - 'chat': [ - 'claude-instant-1', - 'claude-2', - 'gpt-4', - 'gpt-4-32k', - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-16k', - ], - 'completion': [ - 'claude-instant-1', - 'claude-2', - 'gpt-4', - 'gpt-4-32k', - 'gpt-3.5-turbo', - 'gpt-3.5-turbo-16k', - 'text-davinci-003', - ] -} - -SUPPORT_AGENT_MODELS = [ - "gpt-4", - "gpt-4-32k", - "gpt-3.5-turbo", - "gpt-3.5-turbo-16k", -] SUPPORT_TOOLS = ["dataset", "google_search", "web_reader", "wikipedia", "current_datetime"] @@ -65,40 +34,40 @@ class AppModelConfigService: # max_tokens if 'max_tokens' not in cp: cp["max_tokens"] = 512 - - if not isinstance(cp["max_tokens"], int) or cp["max_tokens"] <= 0 or cp["max_tokens"] > \ - llm_constant.max_context_token_length[model_name]: - raise ValueError( - "max_tokens must be an integer greater than 0 " - "and not exceeding the maximum value of the corresponding model") - + # + # if not isinstance(cp["max_tokens"], int) or cp["max_tokens"] <= 0 or cp["max_tokens"] > \ + # llm_constant.max_context_token_length[model_name]: + # raise ValueError( + # "max_tokens must be an integer greater than 0 " + # "and not exceeding the maximum value of the corresponding model") + # # temperature if 'temperature' not in cp: cp["temperature"] = 1 - - if not isinstance(cp["temperature"], (float, int)) or cp["temperature"] < 0 or cp["temperature"] > 2: - raise ValueError("temperature must be a float between 0 and 2") - + # + # if not isinstance(cp["temperature"], (float, int)) or cp["temperature"] < 0 or cp["temperature"] > 2: + # raise ValueError("temperature must be a float between 0 and 2") + # # top_p if 'top_p' not in cp: cp["top_p"] = 1 - if not isinstance(cp["top_p"], (float, int)) or cp["top_p"] < 0 or cp["top_p"] > 2: - raise ValueError("top_p must be a float between 0 and 2") - + # if not isinstance(cp["top_p"], (float, int)) or cp["top_p"] < 0 or cp["top_p"] > 2: + # raise ValueError("top_p must be a float between 0 and 2") + # # presence_penalty if 'presence_penalty' not in cp: cp["presence_penalty"] = 0 - if not isinstance(cp["presence_penalty"], (float, int)) or cp["presence_penalty"] < -2 or cp["presence_penalty"] > 2: - raise ValueError("presence_penalty must be a float between -2 and 2") - + # if not isinstance(cp["presence_penalty"], (float, int)) or cp["presence_penalty"] < -2 or cp["presence_penalty"] > 2: + # raise ValueError("presence_penalty must be a float between -2 and 2") + # # presence_penalty if 'frequency_penalty' not in cp: cp["frequency_penalty"] = 0 - if not isinstance(cp["frequency_penalty"], (float, int)) or cp["frequency_penalty"] < -2 or cp["frequency_penalty"] > 2: - raise ValueError("frequency_penalty must be a float between -2 and 2") + # if not isinstance(cp["frequency_penalty"], (float, int)) or cp["frequency_penalty"] < -2 or cp["frequency_penalty"] > 2: + # raise ValueError("frequency_penalty must be a float between -2 and 2") # Filter out extra parameters filtered_cp = { @@ -112,7 +81,7 @@ class AppModelConfigService: return filtered_cp @staticmethod - def validate_configuration(account: Account, config: dict, mode: str) -> dict: + def validate_configuration(tenant_id: str, account: Account, config: dict) -> dict: # opening_statement if 'opening_statement' not in config or not config["opening_statement"]: config["opening_statement"] = "" @@ -211,14 +180,21 @@ class AppModelConfigService: raise ValueError("model must be of object type") # model.provider - if 'provider' not in config["model"] or config["model"]["provider"] not in MODEL_PROVIDERS: - raise ValueError(f"model.provider is required and must be in {str(MODEL_PROVIDERS)}") + model_provider_names = ModelProviderFactory.get_provider_names() + if 'provider' not in config["model"] or config["model"]["provider"] not in model_provider_names: + raise ValueError(f"model.provider is required and must be in {str(model_provider_names)}") # model.name if 'name' not in config["model"]: raise ValueError("model.name is required") - if config["model"]["name"] not in MODELS_BY_APP_MODE[mode]: + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, config["model"]["provider"]) + if not model_provider: + raise ValueError("model.name must be in the specified model list") + + model_list = model_provider.get_supported_model_list(ModelType.TEXT_GENERATION) + model_ids = [m['id'] for m in model_list] + if config["model"]["name"] not in model_ids: raise ValueError("model.name must be in the specified model list") # model.completion_params diff --git a/api/services/audio_service.py b/api/services/audio_service.py index 667fb4cb6..db1f0fe21 100644 --- a/api/services/audio_service.py +++ b/api/services/audio_service.py @@ -1,15 +1,13 @@ import io from werkzeug.datastructures import FileStorage -from core.llm.llm_builder import LLMBuilder -from core.llm.provider.llm_provider_service import LLMProviderService +from core.model_providers.model_factory import ModelFactory from services.errors.audio import NoAudioUploadedServiceError, AudioTooLargeServiceError, UnsupportedAudioTypeServiceError, ProviderNotSupportSpeechToTextServiceError -from core.llm.whisper import Whisper -from models.provider import ProviderName FILE_SIZE = 15 FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024 ALLOWED_EXTENSIONS = ['mp3', 'mp4', 'mpeg', 'mpga', 'm4a', 'wav', 'webm'] + class AudioService: @classmethod def transcript(cls, tenant_id: str, file: FileStorage): @@ -26,14 +24,12 @@ class AudioService: if file_size > FILE_SIZE_LIMIT: message = f"Audio size larger than {FILE_SIZE} mb" raise AudioTooLargeServiceError(message) - - provider_name = LLMBuilder.get_default_provider(tenant_id, 'whisper-1') - if provider_name != ProviderName.OPENAI.value: - raise ProviderNotSupportSpeechToTextServiceError() - provider_service = LLMProviderService(tenant_id, provider_name) + model = ModelFactory.get_speech2text_model( + tenant_id=tenant_id + ) buffer = io.BytesIO(file_content) buffer.name = 'temp.mp3' - return Whisper(provider_service.provider).transcribe(buffer) + return model.run(buffer) diff --git a/api/services/completion_service.py b/api/services/completion_service.py index c081d8ec0..8899cdc11 100644 --- a/api/services/completion_service.py +++ b/api/services/completion_service.py @@ -11,7 +11,7 @@ from sqlalchemy import and_ from core.completion import Completion from core.conversation_message_task import PubHandler, ConversationTaskStoppedException -from core.llm.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, \ +from core.model_providers.error import LLMBadRequestError, LLMAPIConnectionError, LLMAPIUnavailableError, LLMRateLimitError, \ LLMAuthorizationError, ProviderTokenNotInitError, QuotaExceededError, ModelCurrentlyNotSupportError from extensions.ext_database import db from extensions.ext_redis import redis_client @@ -127,9 +127,9 @@ class CompletionService: # validate config model_config = AppModelConfigService.validate_configuration( + tenant_id=app_model.tenant_id, account=user, - config=args['model_config'], - mode=app_model.mode + config=args['model_config'] ) app_model_config = AppModelConfig( diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 3f3f3652e..5edd4b3da 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -9,8 +9,7 @@ from typing import Optional, List from flask import current_app from sqlalchemy import func -from core.llm.token_calculator import TokenCalculator -from events.event_handlers.document_index_event import document_index_created +from core.model_providers.model_factory import ModelFactory from extensions.ext_redis import redis_client from flask_login import current_user @@ -875,8 +874,13 @@ class SegmentService: content = args['content'] doc_id = str(uuid.uuid4()) segment_hash = helper.generate_text_hash(content) + + embedding_model = ModelFactory.get_embedding_model( + tenant_id=document.tenant_id + ) + # calc embedding use tokens - tokens = TokenCalculator.get_num_tokens('text-embedding-ada-002', content) + tokens = embedding_model.get_num_tokens(content) max_position = db.session.query(func.max(DocumentSegment.position)).filter( DocumentSegment.document_id == document.id ).scalar() @@ -921,8 +925,13 @@ class SegmentService: update_segment_keyword_index_task.delay(segment.id) else: segment_hash = helper.generate_text_hash(content) + + embedding_model = ModelFactory.get_embedding_model( + tenant_id=document.tenant_id + ) + # calc embedding use tokens - tokens = TokenCalculator.get_num_tokens('text-embedding-ada-002', content) + tokens = embedding_model.get_num_tokens(content) segment.content = content segment.index_node_hash = segment_hash segment.word_count = len(content) diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 17a4a4f4c..3c1247ba5 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -4,14 +4,13 @@ from typing import List import numpy as np from flask import current_app -from langchain.embeddings import OpenAIEmbeddings from langchain.embeddings.base import Embeddings from langchain.schema import Document from sklearn.manifold import TSNE from core.embedding.cached_embedding import CacheEmbedding from core.index.vector_index.vector_index import VectorIndex -from core.llm.llm_builder import LLMBuilder +from core.model_providers.model_factory import ModelFactory from extensions.ext_database import db from models.account import Account from models.dataset import Dataset, DocumentSegment, DatasetQuery @@ -29,15 +28,11 @@ class HitTestingService: "records": [] } - model_credentials = LLMBuilder.get_model_credentials( - tenant_id=dataset.tenant_id, - model_provider=LLMBuilder.get_default_provider(dataset.tenant_id, 'text-embedding-ada-002'), - model_name='text-embedding-ada-002' + embedding_model = ModelFactory.get_embedding_model( + tenant_id=dataset.tenant_id ) - embeddings = CacheEmbedding(OpenAIEmbeddings( - **model_credentials - )) + embeddings = CacheEmbedding(embedding_model) vector_index = VectorIndex( dataset=dataset, diff --git a/api/services/provider_checkout_service.py b/api/services/provider_checkout_service.py new file mode 100644 index 000000000..80391dfac --- /dev/null +++ b/api/services/provider_checkout_service.py @@ -0,0 +1,158 @@ +import datetime +import logging + +import stripe +from flask import current_app + +from core.model_providers.model_provider_factory import ModelProviderFactory +from extensions.ext_database import db +from models.account import Account +from models.provider import ProviderOrder, ProviderOrderPaymentStatus, ProviderType, Provider, ProviderQuotaType + + +class ProviderCheckout: + def __init__(self, stripe_checkout_session): + self.stripe_checkout_session = stripe_checkout_session + + def get_checkout_url(self): + return self.stripe_checkout_session.url + + +class ProviderCheckoutService: + def create_checkout(self, tenant_id: str, provider_name: str, account: Account) -> ProviderCheckout: + # check provider name is valid + model_provider_rules = ModelProviderFactory.get_provider_rules() + if provider_name not in model_provider_rules: + raise ValueError(f'provider name {provider_name} is invalid') + + model_provider_rule = model_provider_rules[provider_name] + + # check provider name can be paid + self._check_provider_payable(provider_name, model_provider_rule) + + # get stripe checkout product id + paid_provider = self._get_paid_provider(tenant_id, provider_name) + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_name) + model_provider = model_provider_class(provider=paid_provider) + payment_info = model_provider.get_payment_info() + if not payment_info: + raise ValueError(f'provider name {provider_name} not support payment') + + payment_product_id = payment_info['product_id'] + + # create provider order + provider_order = ProviderOrder( + tenant_id=tenant_id, + provider_name=provider_name, + account_id=account.id, + payment_product_id=payment_product_id, + quantity=1, + payment_status=ProviderOrderPaymentStatus.WAIT_PAY.value + ) + + db.session.add(provider_order) + db.session.flush() + + try: + # create stripe checkout session + checkout_session = stripe.checkout.Session.create( + line_items=[ + { + 'price': f'{payment_product_id}', + 'quantity': 1, + }, + ], + mode='payment', + success_url=current_app.config.get("CONSOLE_WEB_URL") + '?provider_payment=succeeded', + cancel_url=current_app.config.get("CONSOLE_WEB_URL") + '?provider_payment=cancelled', + automatic_tax={'enabled': True}, + ) + except Exception as e: + logging.exception(e) + raise ValueError(f'provider name {provider_name} create checkout session failed, please try again later') + + provider_order.payment_id = checkout_session.id + db.session.commit() + + return ProviderCheckout(checkout_session) + + def fulfill_provider_order(self, event): + provider_order = db.session.query(ProviderOrder) \ + .filter(ProviderOrder.payment_id == event['data']['object']['id']) \ + .first() + + if not provider_order: + raise ValueError(f'provider order not found, payment id: {event["data"]["object"]["id"]}') + + if provider_order.payment_status != ProviderOrderPaymentStatus.WAIT_PAY.value: + raise ValueError(f'provider order payment status is not wait pay, payment id: {event["data"]["object"]["id"]}') + + provider_order.transaction_id = event['data']['object']['payment_intent'] + provider_order.currency = event['data']['object']['currency'] + provider_order.total_amount = event['data']['object']['amount_subtotal'] + provider_order.payment_status = ProviderOrderPaymentStatus.PAID.value + provider_order.paid_at = datetime.datetime.utcnow() + provider_order.updated_at = provider_order.paid_at + + # update provider quota + provider = db.session.query(Provider).filter( + Provider.tenant_id == provider_order.tenant_id, + Provider.provider_name == provider_order.provider_name, + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == ProviderQuotaType.PAID.value + ).first() + + if not provider: + raise ValueError(f'provider not found, tenant id: {provider_order.tenant_id}, ' + f'provider name: {provider_order.provider_name}') + + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_order.provider_name) + model_provider = model_provider_class(provider=provider) + payment_info = model_provider.get_payment_info() + + if not payment_info: + increase_quota = 0 + else: + increase_quota = int(payment_info['increase_quota']) + + if increase_quota > 0: + provider.quota_limit += increase_quota + provider.is_valid = True + + db.session.commit() + + def _check_provider_payable(self, provider_name: str, model_provider_rule: dict): + if ProviderType.SYSTEM.value not in model_provider_rule['support_provider_types']: + raise ValueError(f'provider name {provider_name} not support payment') + + if 'system_config' not in model_provider_rule: + raise ValueError(f'provider name {provider_name} not support payment') + + if 'supported_quota_types' not in model_provider_rule['system_config']: + raise ValueError(f'provider name {provider_name} not support payment') + + if 'paid' not in model_provider_rule['system_config']['supported_quota_types']: + raise ValueError(f'provider name {provider_name} not support payment') + + def _get_paid_provider(self, tenant_id: str, provider_name: str): + paid_provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == provider_name, + Provider.provider_type == ProviderType.SYSTEM.value, + Provider.quota_type == ProviderQuotaType.PAID.value, + ).first() + + if not paid_provider: + paid_provider = Provider( + tenant_id=tenant_id, + provider_name=provider_name, + provider_type=ProviderType.SYSTEM.value, + quota_type=ProviderQuotaType.PAID.value, + quota_limit=0, + quota_used=0, + ) + db.session.add(paid_provider) + db.session.commit() + + return paid_provider diff --git a/api/services/provider_service.py b/api/services/provider_service.py index fffd3fbd5..de8f53d8f 100644 --- a/api/services/provider_service.py +++ b/api/services/provider_service.py @@ -1,88 +1,503 @@ -from typing import Union +import datetime +import json +from collections import defaultdict +from typing import Optional -from flask import current_app - -from core.llm.provider.llm_provider_service import LLMProviderService -from models.account import Tenant -from models.provider import * +from core.model_providers.model_factory import ModelFactory +from extensions.ext_database import db +from core.model_providers.model_provider_factory import ModelProviderFactory +from core.model_providers.models.entity.model_params import ModelType, ModelKwargsRules +from models.provider import Provider, ProviderModel, TenantPreferredModelProvider, ProviderType, ProviderQuotaType, \ + TenantDefaultModel class ProviderService: - @staticmethod - def init_supported_provider(tenant): - """Initialize the model provider, check whether the supported provider has a record""" + def get_provider_list(self, tenant_id: str): + """ + get provider list of tenant. - need_init_provider_names = [ProviderName.OPENAI.value, ProviderName.AZURE_OPENAI.value, ProviderName.ANTHROPIC.value] + :param tenant_id: + :return: + """ + # get rules for all providers + model_provider_rules = ModelProviderFactory.get_provider_rules() + model_provider_names = [model_provider_name for model_provider_name, _ in model_provider_rules.items()] + configurable_model_provider_names = [ + model_provider_name + for model_provider_name, model_provider_rules in model_provider_rules.items() + if 'custom' in model_provider_rules['support_provider_types'] + and model_provider_rules['model_flexibility'] == 'configurable' + ] - providers = db.session.query(Provider).filter( - Provider.tenant_id == tenant.id, - Provider.provider_type == ProviderType.CUSTOM.value, - Provider.provider_name.in_(need_init_provider_names) + # get all providers for the tenant + providers = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name.in_(model_provider_names), + Provider.is_valid == True + ).order_by(Provider.created_at.desc()).all() + + provider_name_to_provider_dict = defaultdict(list) + for provider in providers: + provider_name_to_provider_dict[provider.provider_name].append(provider) + + # get all configurable provider models for the tenant + provider_models = db.session.query(ProviderModel) \ + .filter( + ProviderModel.tenant_id == tenant_id, + ProviderModel.provider_name.in_(configurable_model_provider_names), + ProviderModel.is_valid == True + ).order_by(ProviderModel.created_at.desc()).all() + + provider_name_to_provider_model_dict = defaultdict(list) + for provider_model in provider_models: + provider_name_to_provider_model_dict[provider_model.provider_name].append(provider_model) + + # get all preferred provider type for the tenant + preferred_provider_types = db.session.query(TenantPreferredModelProvider) \ + .filter( + TenantPreferredModelProvider.tenant_id == tenant_id, + TenantPreferredModelProvider.provider_name.in_(model_provider_names) ).all() - exists_provider_names = [] - for provider in providers: - exists_provider_names.append(provider.provider_name) + provider_name_to_preferred_provider_type_dict = {preferred_provider_type.provider_name: preferred_provider_type + for preferred_provider_type in preferred_provider_types} - not_exists_provider_names = list(set(need_init_provider_names) - set(exists_provider_names)) + providers_list = {} - if not_exists_provider_names: - # Initialize the model provider, check whether the supported provider has a record - for provider_name in not_exists_provider_names: - provider = Provider( - tenant_id=tenant.id, - provider_name=provider_name, - provider_type=ProviderType.CUSTOM.value, - is_valid=False - ) - db.session.add(provider) + for model_provider_name, model_provider_rule in model_provider_rules.items(): + # get preferred provider type + preferred_model_provider = provider_name_to_preferred_provider_type_dict.get(model_provider_name) + preferred_provider_type = ModelProviderFactory.get_preferred_type_by_preferred_model_provider( + tenant_id, + model_provider_name, + preferred_model_provider + ) - db.session.commit() + provider_config_dict = { + "preferred_provider_type": preferred_provider_type, + "model_flexibility": model_provider_rule['model_flexibility'], + } - @staticmethod - def get_obfuscated_api_key(tenant, provider_name: ProviderName, only_custom: bool = False): - llm_provider_service = LLMProviderService(tenant.id, provider_name.value) - return llm_provider_service.get_provider_configs(obfuscated=True, only_custom=only_custom) + provider_parameter_dict = {} + if ProviderType.SYSTEM.value in model_provider_rule['support_provider_types']: + for quota_type_enum in ProviderQuotaType: + quota_type = quota_type_enum.value + if quota_type in model_provider_rule['system_config']['supported_quota_types']: + key = ProviderType.SYSTEM.value + ':' + quota_type + provider_parameter_dict[key] = { + "provider_name": model_provider_name, + "provider_type": ProviderType.SYSTEM.value, + "config": None, + "is_valid": False, # need update + "quota_type": quota_type, + "quota_unit": model_provider_rule['system_config']['quota_unit'], # need update + "quota_limit": 0 if quota_type != ProviderQuotaType.TRIAL.value else + model_provider_rule['system_config']['quota_limit'], # need update + "quota_used": 0, # need update + "last_used": None # need update + } - @staticmethod - def get_token_type(tenant, provider_name: ProviderName): - llm_provider_service = LLMProviderService(tenant.id, provider_name.value) - return llm_provider_service.get_token_type() + if ProviderType.CUSTOM.value in model_provider_rule['support_provider_types']: + provider_parameter_dict[ProviderType.CUSTOM.value] = { + "provider_name": model_provider_name, + "provider_type": ProviderType.CUSTOM.value, + "config": None, # need update + "models": [], # need update + "is_valid": False, + "last_used": None # need update + } - @staticmethod - def validate_provider_configs(tenant, provider_name: ProviderName, configs: Union[dict | str]): - if current_app.config['DISABLE_PROVIDER_CONFIG_VALIDATION']: - return - llm_provider_service = LLMProviderService(tenant.id, provider_name.value) - return llm_provider_service.config_validate(configs) + model_provider_class = ModelProviderFactory.get_model_provider_class(model_provider_name) - @staticmethod - def get_encrypted_token(tenant, provider_name: ProviderName, configs: Union[dict | str]): - llm_provider_service = LLMProviderService(tenant.id, provider_name.value) - return llm_provider_service.get_encrypted_token(configs) + current_providers = provider_name_to_provider_dict[model_provider_name] + for provider in current_providers: + if provider.provider_type == ProviderType.SYSTEM.value: + quota_type = provider.quota_type + key = f'{ProviderType.SYSTEM.value}:{quota_type}' - @staticmethod - def create_system_provider(tenant: Tenant, provider_name: str = ProviderName.OPENAI.value, quota_limit: int = 200, - is_valid: bool = True): - if current_app.config['EDITION'] != 'CLOUD': - return + if key in provider_parameter_dict: + provider_parameter_dict[key]['is_valid'] = provider.is_valid + provider_parameter_dict[key]['quota_used'] = provider.quota_used + provider_parameter_dict[key]['quota_limit'] = provider.quota_limit + provider_parameter_dict[key]['last_used'] = provider.last_used + elif provider.provider_type == ProviderType.CUSTOM.value \ + and ProviderType.CUSTOM.value in provider_parameter_dict: + # if custom + key = ProviderType.CUSTOM.value + provider_parameter_dict[key]['last_used'] = provider.last_used + provider_parameter_dict[key]['is_valid'] = provider.is_valid - provider = db.session.query(Provider).filter( - Provider.tenant_id == tenant.id, + if model_provider_rule['model_flexibility'] == 'fixed': + provider_parameter_dict[key]['config'] = model_provider_class(provider=provider) \ + .get_provider_credentials(obfuscated=True) + else: + models = [] + provider_models = provider_name_to_provider_model_dict[model_provider_name] + for provider_model in provider_models: + models.append({ + "model_name": provider_model.model_name, + "model_type": provider_model.model_type, + "config": model_provider_class(provider=provider) \ + .get_model_credentials(provider_model.model_name, + ModelType.value_of(provider_model.model_type), + obfuscated=True), + "is_valid": provider_model.is_valid + }) + provider_parameter_dict[key]['models'] = models + + provider_config_dict['providers'] = list(provider_parameter_dict.values()) + providers_list[model_provider_name] = provider_config_dict + + return providers_list + + def custom_provider_config_validate(self, provider_name: str, config: dict) -> None: + """ + validate custom provider config. + + :param provider_name: + :param config: + :return: + :raises CredentialsValidateFailedError: When the config credential verification fails. + """ + # get model provider rules + model_provider_rules = ModelProviderFactory.get_provider_rule(provider_name) + + if model_provider_rules['model_flexibility'] != 'fixed': + raise ValueError('Only support fixed model provider') + + # only support provider type CUSTOM + if ProviderType.CUSTOM.value not in model_provider_rules['support_provider_types']: + raise ValueError('Only support provider type CUSTOM') + + # validate provider config + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_name) + model_provider_class.is_provider_credentials_valid_or_raise(config) + + def save_custom_provider_config(self, tenant_id: str, provider_name: str, config: dict) -> None: + """ + save custom provider config. + + :param tenant_id: + :param provider_name: + :param config: + :return: + """ + # validate custom provider config + self.custom_provider_config_validate(provider_name, config) + + # get provider + provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, Provider.provider_name == provider_name, - Provider.provider_type == ProviderType.SYSTEM.value - ).one_or_none() + Provider.provider_type == ProviderType.CUSTOM.value + ).first() - if not provider: + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_name) + encrypted_config = model_provider_class.encrypt_provider_credentials(tenant_id, config) + + # save provider + if provider: + provider.encrypted_config = json.dumps(encrypted_config) + provider.is_valid = True + provider.updated_at = datetime.datetime.utcnow() + db.session.commit() + else: provider = Provider( - tenant_id=tenant.id, + tenant_id=tenant_id, provider_name=provider_name, - provider_type=ProviderType.SYSTEM.value, - quota_type=ProviderQuotaType.TRIAL.value, - quota_limit=quota_limit, - encrypted_config='', - is_valid=is_valid, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_config), + is_valid=True ) db.session.add(provider) db.session.commit() + + def delete_custom_provider(self, tenant_id: str, provider_name: str) -> None: + """ + delete custom provider. + + :param tenant_id: + :param provider_name: + :return: + """ + # get provider + provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == provider_name, + Provider.provider_type == ProviderType.CUSTOM.value + ).first() + + if provider: + try: + self.switch_preferred_provider(tenant_id, provider_name, ProviderType.SYSTEM.value) + except ValueError: + pass + + db.session.delete(provider) + db.session.commit() + + def custom_provider_model_config_validate(self, + provider_name: str, + model_name: str, + model_type: str, + config: dict) -> None: + """ + validate custom provider model config. + + :param provider_name: + :param model_name: + :param model_type: + :param config: + :return: + :raises CredentialsValidateFailedError: When the config credential verification fails. + """ + # get model provider rules + model_provider_rules = ModelProviderFactory.get_provider_rule(provider_name) + + if model_provider_rules['model_flexibility'] != 'configurable': + raise ValueError('Only support configurable model provider') + + # only support provider type CUSTOM + if ProviderType.CUSTOM.value not in model_provider_rules['support_provider_types']: + raise ValueError('Only support provider type CUSTOM') + + # validate provider model config + model_type = ModelType.value_of(model_type) + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_name) + model_provider_class.is_model_credentials_valid_or_raise(model_name, model_type, config) + + def add_or_save_custom_provider_model_config(self, + tenant_id: str, + provider_name: str, + model_name: str, + model_type: str, + config: dict) -> None: + """ + Add or save custom provider model config. + + :param tenant_id: + :param provider_name: + :param model_name: + :param model_type: + :param config: + :return: + """ + # validate custom provider model config + self.custom_provider_model_config_validate(provider_name, model_name, model_type, config) + + # get provider + provider = db.session.query(Provider) \ + .filter( + Provider.tenant_id == tenant_id, + Provider.provider_name == provider_name, + Provider.provider_type == ProviderType.CUSTOM.value + ).first() + + if not provider: + provider = Provider( + tenant_id=tenant_id, + provider_name=provider_name, + provider_type=ProviderType.CUSTOM.value, + is_valid=True + ) + db.session.add(provider) + db.session.commit() + elif not provider.is_valid: + provider.is_valid = True + provider.encrypted_config = None + db.session.commit() + + model_provider_class = ModelProviderFactory.get_model_provider_class(provider_name) + encrypted_config = model_provider_class.encrypt_model_credentials( + tenant_id, + model_name, + ModelType.value_of(model_type), + config + ) + + # get provider model + provider_model = db.session.query(ProviderModel) \ + .filter( + ProviderModel.tenant_id == tenant_id, + ProviderModel.provider_name == provider_name, + ProviderModel.model_name == model_name, + ProviderModel.model_type == model_type + ).first() + + if provider_model: + provider_model.encrypted_config = json.dumps(encrypted_config) + provider_model.is_valid = True + db.session.commit() + else: + provider_model = ProviderModel( + tenant_id=tenant_id, + provider_name=provider_name, + model_name=model_name, + model_type=model_type, + encrypted_config=json.dumps(encrypted_config), + is_valid=True + ) + db.session.add(provider_model) + db.session.commit() + + def delete_custom_provider_model(self, + tenant_id: str, + provider_name: str, + model_name: str, + model_type: str) -> None: + """ + delete custom provider model. + + :param tenant_id: + :param provider_name: + :param model_name: + :param model_type: + :return: + """ + # get provider model + provider_model = db.session.query(ProviderModel) \ + .filter( + ProviderModel.tenant_id == tenant_id, + ProviderModel.provider_name == provider_name, + ProviderModel.model_name == model_name, + ProviderModel.model_type == model_type + ).first() + + if provider_model: + db.session.delete(provider_model) + db.session.commit() + + def switch_preferred_provider(self, tenant_id: str, provider_name: str, preferred_provider_type: str) -> None: + """ + switch preferred provider. + + :param tenant_id: + :param provider_name: + :param preferred_provider_type: + :return: + """ + provider_type = ProviderType.value_of(preferred_provider_type) + if not provider_type: + raise ValueError(f'Invalid preferred provider type: {preferred_provider_type}') + + model_provider_rules = ModelProviderFactory.get_provider_rule(provider_name) + if preferred_provider_type not in model_provider_rules['support_provider_types']: + raise ValueError(f'Not support provider type: {preferred_provider_type}') + + model_provider = ModelProviderFactory.get_model_provider_class(provider_name) + if not model_provider.is_provider_type_system_supported(): + return + + # get preferred provider + preferred_model_provider = db.session.query(TenantPreferredModelProvider) \ + .filter( + TenantPreferredModelProvider.tenant_id == tenant_id, + TenantPreferredModelProvider.provider_name == provider_name + ).first() + + if preferred_model_provider: + preferred_model_provider.preferred_provider_type = preferred_provider_type + else: + preferred_model_provider = TenantPreferredModelProvider( + tenant_id=tenant_id, + provider_name=provider_name, + preferred_provider_type=preferred_provider_type + ) + db.session.add(preferred_model_provider) + + db.session.commit() + + def get_default_model_of_model_type(self, tenant_id: str, model_type: str) -> Optional[TenantDefaultModel]: + """ + get default model of model type. + + :param tenant_id: + :param model_type: + :return: + """ + return ModelFactory.get_default_model(tenant_id, ModelType.value_of(model_type)) + + def update_default_model_of_model_type(self, + tenant_id: str, + model_type: str, + provider_name: str, + model_name: str) -> TenantDefaultModel: + """ + update default model of model type. + + :param tenant_id: + :param model_type: + :param provider_name: + :param model_name: + :return: + """ + return ModelFactory.update_default_model(tenant_id, ModelType.value_of(model_type), provider_name, model_name) + + def get_valid_model_list(self, tenant_id: str, model_type: str) -> list: + """ + get valid model list. + + :param tenant_id: + :param model_type: + :return: + """ + valid_model_list = [] + + # get model provider rules + model_provider_rules = ModelProviderFactory.get_provider_rules() + for model_provider_name, model_provider_rule in model_provider_rules.items(): + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + if not model_provider: + continue + + model_list = model_provider.get_supported_model_list(ModelType.value_of(model_type)) + provider = model_provider.provider + for model in model_list: + valid_model_dict = { + "model_name": model['id'], + "model_type": model_type, + "model_provider": { + "provider_name": provider.provider_name, + "provider_type": provider.provider_type + }, + 'features': [] + } + + if 'features' in model: + valid_model_dict['features'] = model['features'] + + if provider.provider_type == ProviderType.SYSTEM.value: + valid_model_dict['model_provider']['quota_type'] = provider.quota_type + valid_model_dict['model_provider']['quota_unit'] = model_provider_rule['system_config']['quota_unit'] + valid_model_dict['model_provider']['quota_limit'] = provider.quota_limit + valid_model_dict['model_provider']['quota_used'] = provider.quota_used + + valid_model_list.append(valid_model_dict) + + return valid_model_list + + def get_model_parameter_rules(self, tenant_id: str, model_provider_name: str, model_name: str, model_type: str) \ + -> ModelKwargsRules: + """ + get model parameter rules. + It depends on preferred provider in use. + + :param tenant_id: + :param model_provider_name: + :param model_name: + :param model_type: + :return: + """ + # get model provider + model_provider = ModelProviderFactory.get_preferred_model_provider(tenant_id, model_provider_name) + if not model_provider: + # get empty model provider + return ModelKwargsRules() + + # get model parameter rules + return model_provider.get_model_parameter_rules(model_name, ModelType.value_of(model_type)) + diff --git a/api/services/workspace_service.py b/api/services/workspace_service.py index abd1f7f3f..96319818c 100644 --- a/api/services/workspace_service.py +++ b/api/services/workspace_service.py @@ -1,6 +1,6 @@ from extensions.ext_database import db from models.account import Tenant -from models.provider import Provider, ProviderType, ProviderName +from models.provider import Provider class WorkspaceService: @@ -13,8 +13,8 @@ class WorkspaceService: 'status': tenant.status, 'created_at': tenant.created_at, 'providers': [], - 'in_trail': False, - 'trial_end_reason': 'using_custom' + 'in_trial': True, + 'trial_end_reason': None } # Get providers @@ -25,25 +25,4 @@ class WorkspaceService: # Add providers to the tenant info tenant_info['providers'] = providers - custom_provider = None - system_provider = None - - for provider in providers: - if provider.provider_type == ProviderType.CUSTOM.value: - if provider.is_valid and provider.encrypted_config: - custom_provider = provider - elif provider.provider_type == ProviderType.SYSTEM.value: - if provider.provider_name == ProviderName.OPENAI.value and provider.is_valid: - system_provider = provider - - if system_provider and not custom_provider: - quota_used = system_provider.quota_used if system_provider.quota_used is not None else 0 - quota_limit = system_provider.quota_limit if system_provider.quota_limit is not None else 0 - - if quota_used >= quota_limit: - tenant_info['trial_end_reason'] = 'trial_exceeded' - else: - tenant_info['in_trail'] = True - tenant_info['trial_end_reason'] = None - return tenant_info diff --git a/api/tests/conftest.py b/api/tests/conftest.py deleted file mode 100644 index 48de03784..000000000 --- a/api/tests/conftest.py +++ /dev/null @@ -1,50 +0,0 @@ -# -*- coding:utf-8 -*- - -import pytest -import flask_migrate - -from app import create_app -from extensions.ext_database import db - - -@pytest.fixture(scope='module') -def test_client(): - # Create a Flask app configured for testing - from config import TestConfig - flask_app = create_app(TestConfig()) - flask_app.config.from_object('config.TestingConfig') - - # Create a test client using the Flask application configured for testing - with flask_app.test_client() as testing_client: - # Establish an application context - with flask_app.app_context(): - yield testing_client # this is where the testing happens! - - -@pytest.fixture(scope='module') -def init_database(test_client): - # Initialize the database - with test_client.application.app_context(): - flask_migrate.upgrade() - - yield # this is where the testing happens! - - # Clean up the database - with test_client.application.app_context(): - flask_migrate.downgrade() - - -@pytest.fixture(scope='module') -def db_session(test_client): - with test_client.application.app_context(): - yield db.session - - -@pytest.fixture(scope='function') -def login_default_user(test_client): - - # todo - - yield # this is where the testing happens! - - test_client.get('/logout', follow_redirects=True) \ No newline at end of file diff --git a/api/tests/integration_tests/.env.example b/api/tests/integration_tests/.env.example new file mode 100644 index 000000000..f1ee23941 --- /dev/null +++ b/api/tests/integration_tests/.env.example @@ -0,0 +1,35 @@ +# OpenAI API Key +OPENAI_API_KEY= + +# Azure OpenAI API Base Endpoint & API Key +AZURE_OPENAI_API_BASE= +AZURE_OPENAI_API_KEY= + +# Anthropic API Key +ANTHROPIC_API_KEY= + +# Replicate API Key +REPLICATE_API_TOKEN= + +# Hugging Face API Key +HUGGINGFACE_API_KEY= +HUGGINGFACE_ENDPOINT_URL= + +# Minimax Credentials +MINIMAX_API_KEY= +MINIMAX_GROUP_ID= + +# Spark Credentials +SPARK_APP_ID= +SPARK_API_KEY= +SPARK_API_SECRET= + +# Tongyi Credentials +TONGYI_DASHSCOPE_API_KEY= + +# Wenxin Credentials +WENXIN_API_KEY= +WENXIN_SECRET_KEY= + +# ChatGLM Credentials +CHATGLM_API_BASE= \ No newline at end of file diff --git a/api/tests/integration_tests/__init__.py b/api/tests/integration_tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/conftest.py b/api/tests/integration_tests/conftest.py new file mode 100644 index 000000000..6e3ab4b74 --- /dev/null +++ b/api/tests/integration_tests/conftest.py @@ -0,0 +1,19 @@ +import os + +# Getting the absolute path of the current file's directory +ABS_PATH = os.path.dirname(os.path.abspath(__file__)) + +# Getting the absolute path of the project's root directory +PROJECT_DIR = os.path.abspath(os.path.join(ABS_PATH, os.pardir, os.pardir)) + + +# Loading the .env file if it exists +def _load_env() -> None: + dotenv_path = os.path.join(PROJECT_DIR, "tests", "integration_tests", ".env") + if os.path.exists(dotenv_path): + from dotenv import load_dotenv + + load_dotenv(dotenv_path) + + +_load_env() diff --git a/api/tests/integration_tests/models/__init__.py b/api/tests/integration_tests/models/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/models/embedding/__init__.py b/api/tests/integration_tests/models/embedding/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/models/embedding/test_azure_openai_embedding.py b/api/tests/integration_tests/models/embedding/test_azure_openai_embedding.py new file mode 100644 index 000000000..9ea202a6f --- /dev/null +++ b/api/tests/integration_tests/models/embedding/test_azure_openai_embedding.py @@ -0,0 +1,57 @@ +import json +import os +from unittest.mock import patch, MagicMock + +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider +from core.model_providers.models.embedding.azure_openai_embedding import AzureOpenAIEmbedding +from models.provider import Provider, ProviderType, ProviderModel + + +def get_mock_provider(): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='azure_openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config='', + is_valid=True, + ) + + +def get_mock_azure_openai_embedding_model(mocker): + model_name = 'text-embedding-ada-002' + valid_openai_api_base = os.environ['AZURE_OPENAI_API_BASE'] + valid_openai_api_key = os.environ['AZURE_OPENAI_API_KEY'] + openai_provider = AzureOpenAIProvider(provider=get_mock_provider()) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + provider_name='azure_openai', + model_name=model_name, + model_type=ModelType.EMBEDDINGS.value, + encrypted_config=json.dumps({ + 'openai_api_base': valid_openai_api_base, + 'openai_api_key': valid_openai_api_key, + 'base_model_name': model_name + }), + is_valid=True, + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + return AzureOpenAIEmbedding( + model_provider=openai_provider, + name=model_name + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_embedding(mock_decrypt, mocker): + embedding_model = get_mock_azure_openai_embedding_model(mocker) + rst = embedding_model.client.embed_query('test') + assert isinstance(rst, list) + assert len(rst) == 1536 diff --git a/api/tests/integration_tests/models/embedding/test_minimax_embedding.py b/api/tests/integration_tests/models/embedding/test_minimax_embedding.py new file mode 100644 index 000000000..feaad6bb1 --- /dev/null +++ b/api/tests/integration_tests/models/embedding/test_minimax_embedding.py @@ -0,0 +1,44 @@ +import json +import os +from unittest.mock import patch + +from core.model_providers.models.embedding.minimax_embedding import MinimaxEmbedding +from core.model_providers.providers.minimax_provider import MinimaxProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_group_id, valid_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='minimax', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({ + 'minimax_group_id': valid_group_id, + 'minimax_api_key': valid_api_key + }), + is_valid=True, + ) + + +def get_mock_embedding_model(): + model_name = 'embo-01' + valid_api_key = os.environ['MINIMAX_API_KEY'] + valid_group_id = os.environ['MINIMAX_GROUP_ID'] + provider = MinimaxProvider(provider=get_mock_provider(valid_group_id, valid_api_key)) + return MinimaxEmbedding( + model_provider=provider, + name=model_name + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_embedding(mock_decrypt): + embedding_model = get_mock_embedding_model() + rst = embedding_model.client.embed_query('test') + assert isinstance(rst, list) + assert len(rst) == 1536 diff --git a/api/tests/integration_tests/models/embedding/test_openai_embedding.py b/api/tests/integration_tests/models/embedding/test_openai_embedding.py new file mode 100644 index 000000000..14e613349 --- /dev/null +++ b/api/tests/integration_tests/models/embedding/test_openai_embedding.py @@ -0,0 +1,40 @@ +import json +import os +from unittest.mock import patch + +from core.model_providers.providers.openai_provider import OpenAIProvider +from core.model_providers.models.embedding.openai_embedding import OpenAIEmbedding +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_openai_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({'openai_api_key': valid_openai_api_key}), + is_valid=True, + ) + + +def get_mock_openai_embedding_model(): + model_name = 'text-embedding-ada-002' + valid_openai_api_key = os.environ['OPENAI_API_KEY'] + openai_provider = OpenAIProvider(provider=get_mock_provider(valid_openai_api_key)) + return OpenAIEmbedding( + model_provider=openai_provider, + name=model_name + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_embedding(mock_decrypt): + embedding_model = get_mock_openai_embedding_model() + rst = embedding_model.client.embed_query('test') + assert isinstance(rst, list) + assert len(rst) == 1536 diff --git a/api/tests/integration_tests/models/embedding/test_replicate_embedding.py b/api/tests/integration_tests/models/embedding/test_replicate_embedding.py new file mode 100644 index 000000000..16531574c --- /dev/null +++ b/api/tests/integration_tests/models/embedding/test_replicate_embedding.py @@ -0,0 +1,64 @@ +import json +import os +from unittest.mock import patch, MagicMock + +from core.model_providers.models.embedding.replicate_embedding import ReplicateEmbedding +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.replicate_provider import ReplicateProvider +from models.provider import Provider, ProviderType, ProviderModel + + +def get_mock_provider(): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='replicate', + provider_type=ProviderType.CUSTOM.value, + encrypted_config='', + is_valid=True, + ) + + +def get_mock_embedding_model(mocker): + model_name = 'replicate/all-mpnet-base-v2' + valid_api_key = os.environ['REPLICATE_API_TOKEN'] + model_provider = ReplicateProvider(provider=get_mock_provider()) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + provider_name='replicate', + model_name=model_name, + model_type=ModelType.EMBEDDINGS.value, + encrypted_config=json.dumps({ + 'replicate_api_token': valid_api_key, + 'model_version': 'b6b7585c9640cd7a9572c6e129c9549d79c9c31f0d3fdce7baac7c67ca38f305' + }), + is_valid=True, + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + return ReplicateEmbedding( + model_provider=model_provider, + name=model_name + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_embed_documents(mock_decrypt, mocker): + embedding_model = get_mock_embedding_model(mocker) + rst = embedding_model.client.embed_documents(['test', 'test1']) + assert isinstance(rst, list) + assert len(rst) == 2 + assert len(rst[0]) == 768 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_embed_query(mock_decrypt, mocker): + embedding_model = get_mock_embedding_model(mocker) + rst = embedding_model.client.embed_query('test') + assert isinstance(rst, list) + assert len(rst) == 768 diff --git a/api/tests/integration_tests/models/llm/__init__.py b/api/tests/integration_tests/models/llm/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/models/llm/test_anthropic_model.py b/api/tests/integration_tests/models/llm/test_anthropic_model.py new file mode 100644 index 000000000..86cfe9922 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_anthropic_model.py @@ -0,0 +1,61 @@ +import json +import os +from unittest.mock import patch + +from langchain.schema import ChatGeneration, AIMessage + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.anthropic_model import AnthropicModel +from core.model_providers.providers.anthropic_provider import AnthropicProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='anthropic', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({'anthropic_api_key': valid_api_key}), + is_valid=True, + ) + + +def get_mock_model(model_name): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0 + ) + valid_api_key = os.environ['ANTHROPIC_API_KEY'] + model_provider = AnthropicProvider(provider=get_mock_provider(valid_api_key)) + return AnthropicModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + model = get_mock_model('claude-2') + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 6 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_model('claude-2') + messages = [PromptMessage(content='Human: 1 + 1=? \nAssistant: ')] + rst = model.run( + messages, + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == '2' diff --git a/api/tests/integration_tests/models/llm/test_azure_openai_model.py b/api/tests/integration_tests/models/llm/test_azure_openai_model.py new file mode 100644 index 000000000..112dcd684 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_azure_openai_model.py @@ -0,0 +1,86 @@ +import json +import os +from unittest.mock import patch, MagicMock + +import pytest +from langchain.schema import ChatGeneration, AIMessage + +from core.model_providers.models.entity.model_params import ModelKwargs, ModelType +from core.model_providers.models.llm.azure_openai_model import AzureOpenAIModel +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider +from models.provider import Provider, ProviderType, ProviderModel + + +def get_mock_provider(): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='azure_openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config='', + is_valid=True, + ) + + +def get_mock_azure_openai_model(model_name, mocker): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0 + ) + valid_openai_api_base = os.environ['AZURE_OPENAI_API_BASE'] + valid_openai_api_key = os.environ['AZURE_OPENAI_API_KEY'] + provider = AzureOpenAIProvider(provider=get_mock_provider()) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + provider_name='azure_openai', + model_name=model_name, + model_type=ModelType.TEXT_GENERATION.value, + encrypted_config=json.dumps({ + 'openai_api_base': valid_openai_api_base, + 'openai_api_key': valid_openai_api_key, + 'base_model_name': model_name + }), + is_valid=True, + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + return AzureOpenAIModel( + model_provider=provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt, mocker): + openai_model = get_mock_azure_openai_model('text-davinci-003', mocker) + rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')]) + assert rst == 6 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_chat_get_num_tokens(mock_decrypt, mocker): + openai_model = get_mock_azure_openai_model('gpt-35-turbo', mocker) + rst = openai_model.get_num_tokens([ + PromptMessage(type=MessageType.SYSTEM, content='you are a kindness Assistant.'), + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 22 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt, mocker): + openai_model = get_mock_azure_openai_model('gpt-35-turbo', mocker) + messages = [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')] + rst = openai_model.run( + messages, + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'n' diff --git a/api/tests/integration_tests/models/llm/test_huggingface_hub_model.py b/api/tests/integration_tests/models/llm/test_huggingface_hub_model.py new file mode 100644 index 000000000..d55d6e93f --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_huggingface_hub_model.py @@ -0,0 +1,124 @@ +import json +import os +from unittest.mock import patch, MagicMock + +from langchain.schema import Generation + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs, ModelType +from core.model_providers.models.llm.huggingface_hub_model import HuggingfaceHubModel +from core.model_providers.providers.huggingface_hub_provider import HuggingfaceHubProvider +from models.provider import Provider, ProviderType, ProviderModel + + +def get_mock_provider(): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='huggingface_hub', + provider_type=ProviderType.CUSTOM.value, + encrypted_config='', + is_valid=True, + ) + + +def get_mock_model(model_name, huggingfacehub_api_type, mocker): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0.01 + ) + valid_api_key = os.environ['HUGGINGFACE_API_KEY'] + endpoint_url = os.environ['HUGGINGFACE_ENDPOINT_URL'] + model_provider = HuggingfaceHubProvider(provider=get_mock_provider()) + + credentials = { + 'huggingfacehub_api_type': huggingfacehub_api_type, + 'huggingfacehub_api_token': valid_api_key + } + + if huggingfacehub_api_type == 'inference_endpoints': + credentials['huggingfacehub_endpoint_url'] = endpoint_url + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + provider_name='huggingface_hub', + model_name=model_name, + model_type=ModelType.TEXT_GENERATION.value, + encrypted_config=json.dumps(credentials), + is_valid=True, + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + return HuggingfaceHubModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + +@patch('huggingface_hub.hf_api.ModelInfo') +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_hosted_inference_api_get_num_tokens(mock_decrypt, mock_model_info, mocker): + mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation') + mocker.patch('langchain.llms.huggingface_hub.HuggingFaceHub._call', return_value="abc") + + model = get_mock_model( + 'tiiuae/falcon-40b', + 'hosted_inference_api', + mocker + ) + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 5 + + +@patch('huggingface_hub.hf_api.ModelInfo') +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_inference_endpoints_get_num_tokens(mock_decrypt, mock_model_info, mocker): + mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation') + mocker.patch('langchain.llms.huggingface_hub.HuggingFaceHub._call', return_value="abc") + + model = get_mock_model( + '', + 'inference_endpoints', + mocker + ) + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 5 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_hosted_inference_api_run(mock_decrypt, mocker): + model = get_mock_model( + 'google/flan-t5-base', + 'hosted_inference_api', + mocker + ) + + rst = model.run( + [PromptMessage(content='Human: Are you Really Human? you MUST only answer `y` or `n`? \nAssistant: ')], + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'n' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_inference_endpoints_run(mock_decrypt, mocker): + model = get_mock_model( + '', + 'inference_endpoints', + mocker + ) + + rst = model.run( + [PromptMessage(content='Answer the following yes/no question. Can you write a whole Haiku in a single tweet?')], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'no' diff --git a/api/tests/integration_tests/models/llm/test_minimax_model.py b/api/tests/integration_tests/models/llm/test_minimax_model.py new file mode 100644 index 000000000..79a05bc27 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_minimax_model.py @@ -0,0 +1,64 @@ +import json +import os +from unittest.mock import patch + +from langchain.schema import ChatGeneration, AIMessage, Generation + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.minimax_model import MinimaxModel +from core.model_providers.providers.minimax_provider import MinimaxProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_group_id, valid_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='minimax', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({ + 'minimax_group_id': valid_group_id, + 'minimax_api_key': valid_api_key + }), + is_valid=True, + ) + + +def get_mock_model(model_name): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0.01 + ) + valid_api_key = os.environ['MINIMAX_API_KEY'] + valid_group_id = os.environ['MINIMAX_GROUP_ID'] + model_provider = MinimaxProvider(provider=get_mock_provider(valid_group_id, valid_api_key)) + return MinimaxModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + model = get_mock_model('abab5.5-chat') + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 5 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_model('abab5.5-chat') + rst = model.run( + [PromptMessage(content='Human: Are you a real Human? you MUST only answer `y` or `n`? \nAssistant: ')], + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'n' diff --git a/api/tests/integration_tests/models/llm/test_openai_model.py b/api/tests/integration_tests/models/llm/test_openai_model.py new file mode 100644 index 000000000..ebc40fd52 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_openai_model.py @@ -0,0 +1,80 @@ +import json +import os +from unittest.mock import patch + +from langchain.schema import Generation, ChatGeneration, AIMessage + +from core.model_providers.providers.openai_provider import OpenAIProvider +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.openai_model import OpenAIModel +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_openai_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({'openai_api_key': valid_openai_api_key}), + is_valid=True, + ) + + +def get_mock_openai_model(model_name): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0 + ) + model_name = model_name + valid_openai_api_key = os.environ['OPENAI_API_KEY'] + openai_provider = OpenAIProvider(provider=get_mock_provider(valid_openai_api_key)) + return OpenAIModel( + model_provider=openai_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + openai_model = get_mock_openai_model('text-davinci-003') + rst = openai_model.get_num_tokens([PromptMessage(content='you are a kindness Assistant.')]) + assert rst == 6 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_chat_get_num_tokens(mock_decrypt): + openai_model = get_mock_openai_model('gpt-3.5-turbo') + rst = openai_model.get_num_tokens([ + PromptMessage(type=MessageType.SYSTEM, content='you are a kindness Assistant.'), + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 22 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + openai_model = get_mock_openai_model('text-davinci-003') + rst = openai_model.run( + [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')], + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'n' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_chat_run(mock_decrypt): + openai_model = get_mock_openai_model('gpt-3.5-turbo') + messages = [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')] + rst = openai_model.run( + messages, + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == 'n' diff --git a/api/tests/integration_tests/models/llm/test_replicate_model.py b/api/tests/integration_tests/models/llm/test_replicate_model.py new file mode 100644 index 000000000..7689a3c0f --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_replicate_model.py @@ -0,0 +1,73 @@ +import json +import os +from unittest.mock import patch, MagicMock + +from langchain.schema import Generation + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs, ModelType +from core.model_providers.models.llm.replicate_model import ReplicateModel +from core.model_providers.providers.replicate_provider import ReplicateProvider +from models.provider import Provider, ProviderType, ProviderModel + + +def get_mock_provider(): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='replicate', + provider_type=ProviderType.CUSTOM.value, + encrypted_config='', + is_valid=True, + ) + + +def get_mock_model(model_name, model_version, mocker): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0.01 + ) + valid_api_key = os.environ['REPLICATE_API_TOKEN'] + model_provider = ReplicateProvider(provider=get_mock_provider()) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + provider_name='replicate', + model_name=model_name, + model_type=ModelType.TEXT_GENERATION.value, + encrypted_config=json.dumps({ + 'replicate_api_token': valid_api_key, + 'model_version': model_version + }), + is_valid=True, + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + return ReplicateModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt, mocker): + model = get_mock_model('a16z-infra/llama-2-13b-chat', '2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52', mocker) + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 7 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt, mocker): + model = get_mock_model('a16z-infra/llama-2-13b-chat', '2a7f981751ec7fdf87b5b91ad4db53683a98082e9ff7bfd12c8cd5ea85980a52', mocker) + messages = [PromptMessage(content='Human: 1+1=? \nAnswer: ')] + rst = model.run( + messages + ) + assert len(rst.content) > 0 diff --git a/api/tests/integration_tests/models/llm/test_spark_model.py b/api/tests/integration_tests/models/llm/test_spark_model.py new file mode 100644 index 000000000..4e62aeb2c --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_spark_model.py @@ -0,0 +1,69 @@ +import json +import os +from unittest.mock import patch + +from langchain.schema import ChatGeneration, AIMessage, Generation + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.minimax_model import MinimaxModel +from core.model_providers.models.llm.spark_model import SparkModel +from core.model_providers.providers.minimax_provider import MinimaxProvider +from core.model_providers.providers.spark_provider import SparkProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_app_id, valid_api_key, valid_api_secret): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='spark', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({ + 'app_id': valid_app_id, + 'api_key': valid_api_key, + 'api_secret': valid_api_secret, + }), + is_valid=True, + ) + + +def get_mock_model(model_name): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0.01 + ) + valid_app_id = os.environ['SPARK_APP_ID'] + valid_api_key = os.environ['SPARK_API_KEY'] + valid_api_secret = os.environ['SPARK_API_SECRET'] + model_provider = SparkProvider(provider=get_mock_provider(valid_app_id, valid_api_key, valid_api_secret)) + return SparkModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + model = get_mock_model('spark') + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 6 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_model('spark') + messages = [PromptMessage(content='Human: 1 + 1=? \nAssistant: Integer answer is:')] + rst = model.run( + messages, + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == '2' diff --git a/api/tests/integration_tests/models/llm/test_tongyi_model.py b/api/tests/integration_tests/models/llm/test_tongyi_model.py new file mode 100644 index 000000000..2f9e33992 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_tongyi_model.py @@ -0,0 +1,61 @@ +import json +import os +from unittest.mock import patch + +from langchain.schema import ChatGeneration, AIMessage, Generation + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.tongyi_model import TongyiModel +from core.model_providers.providers.tongyi_provider import TongyiProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='tongyi', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({ + 'dashscope_api_key': valid_api_key, + }), + is_valid=True, + ) + + +def get_mock_model(model_name): + model_kwargs = ModelKwargs( + max_tokens=10, + temperature=0.01 + ) + valid_api_key = os.environ['TONGYI_DASHSCOPE_API_KEY'] + model_provider = TongyiProvider(provider=get_mock_provider(valid_api_key)) + return TongyiModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + model = get_mock_model('qwen-v1') + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 5 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_model('qwen-v1') + rst = model.run( + [PromptMessage(content='Human: Are you Human? you MUST only answer `y` or `n`? \nAssistant: ')], + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 diff --git a/api/tests/integration_tests/models/llm/test_wenxin_model.py b/api/tests/integration_tests/models/llm/test_wenxin_model.py new file mode 100644 index 000000000..f517d05c2 --- /dev/null +++ b/api/tests/integration_tests/models/llm/test_wenxin_model.py @@ -0,0 +1,63 @@ +import json +import os +from unittest.mock import patch + + +from core.model_providers.models.entity.message import PromptMessage, MessageType +from core.model_providers.models.entity.model_params import ModelKwargs +from core.model_providers.models.llm.wenxin_model import WenxinModel +from core.model_providers.providers.wenxin_provider import WenxinProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_api_key, valid_secret_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='wenxin', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({ + 'api_key': valid_api_key, + 'secret_key': valid_secret_key, + }), + is_valid=True, + ) + + +def get_mock_model(model_name): + model_kwargs = ModelKwargs( + temperature=0.01 + ) + valid_api_key = os.environ['WENXIN_API_KEY'] + valid_secret_key = os.environ['WENXIN_SECRET_KEY'] + model_provider = WenxinProvider(provider=get_mock_provider(valid_api_key, valid_secret_key)) + return WenxinModel( + model_provider=model_provider, + name=model_name, + model_kwargs=model_kwargs + ) + + +def decrypt_side_effect(tenant_id, encrypted_api_key): + return encrypted_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_num_tokens(mock_decrypt): + model = get_mock_model('ernie-bot') + rst = model.get_num_tokens([ + PromptMessage(type=MessageType.HUMAN, content='Who is your manufacturer?') + ]) + assert rst == 5 + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_model('ernie-bot') + messages = [PromptMessage(content='Human: 1 + 1=? \nAssistant: Integer answer is:')] + rst = model.run( + messages, + stop=['\nHuman:'], + ) + assert len(rst.content) > 0 + assert rst.content.strip() == '2' diff --git a/api/tests/integration_tests/models/moderation/__init__.py b/api/tests/integration_tests/models/moderation/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/models/moderation/test_openai_moderation.py b/api/tests/integration_tests/models/moderation/test_openai_moderation.py new file mode 100644 index 000000000..c27f43e14 --- /dev/null +++ b/api/tests/integration_tests/models/moderation/test_openai_moderation.py @@ -0,0 +1,40 @@ +import json +import os +from unittest.mock import patch + +from core.model_providers.models.moderation.openai_moderation import OpenAIModeration, DEFAULT_AUDIO_MODEL +from core.model_providers.providers.openai_provider import OpenAIProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_openai_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({'openai_api_key': valid_openai_api_key}), + is_valid=True, + ) + + +def get_mock_openai_moderation_model(): + valid_openai_api_key = os.environ['OPENAI_API_KEY'] + openai_provider = OpenAIProvider(provider=get_mock_provider(valid_openai_api_key)) + return OpenAIModeration( + model_provider=openai_provider, + name=DEFAULT_AUDIO_MODEL + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + model = get_mock_openai_moderation_model() + rst = model.run('hello') + + assert isinstance(rst, dict) + assert 'id' in rst diff --git a/api/tests/integration_tests/models/speech2text/__init__.py b/api/tests/integration_tests/models/speech2text/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/integration_tests/models/speech2text/audio.mp3 b/api/tests/integration_tests/models/speech2text/audio.mp3 new file mode 100644 index 0000000000000000000000000000000000000000..7c86e02e160909223668c7b21a60b68afc74ef98 GIT binary patch literal 218880 zcmcG0cU%)$`|qR>0t85?2}MIdYQTghO-T?C6%_@#E?|uKe_w%`b+?x!iKJ%Q(e9v>vGv|B}LC1Iq0`6m9MKwl zQOLqq<%dJL>PQ|d8Lmeek1oD$l{)`(=EI!niFJ=puYcHLhj3{$8WrtKb_;(&JYUIZfDf`YPj9t!!tV+X^-In;Qw`dl@%@0fArnB(okKn+wjEjDj ztawKL&zVY^EbBl{V7?=pyFUHZPb})0FdW9ERrL}%K$<^&i;sgHl|tF^fBIMhiMsJ$ z1rhtp?N1GDq?AMx%npSD?`< z{Mgl;X8OEax*={%#2Zu63G*t(Kg(uK7$H`r!BDZ&UEb;TZ%=+OLcuNljqpey9HXwE z>(X5l8>D+uAGeke$2k~?HX`E^^aiK~raHFT)>Nq<-NmPt*=5NIH`USUO4nM!;+>;c zlTC6RS?DYsB@OS{W5heyQ#2cMrI>6|6rh2i5HTQxZ>PN~4h|eNSO@$MzO%Y`3Nepb zqpo%!W9VsUB61Vx>)W^&L2~*_iMSQvyT^_bK*%SFe3ewn-$$)5UWAYI&*#~ove-LU zPsQB~@SeQzTUF|1t;e7DTo2oGaL^{!=nC!YAMwZ0(z^RmQxRI-DB2H~Gk-<>8oA5+ zv%O4@pVc?|SK%v<-_<@m+iWYT|K;hSzt5N0mi%dOiH;|4Nz z%w4kZZr<%Ln1o$>?v=9=9TXU3H$vwQQt1SqshtTu5KH6)x3ACSo;se)lzDlt_fO+oR5COO zBjH!zo!%fc>>LoUX&2ozsW>YfWn{WRn3kg^)F+G)(`gcpKato^OB4E=Ssr5~dbIQX ztttmDDPcFH34|BI;HxwV>3TTsFDretjKB^V7ZZp&V=tm) z*yn2nqZ-gr!|Y8ZZVk-7Eed?!J6EeN1;n~M{jdG*e*3xh*YQ(tfilEY>F8j+Gt_pky9Q4p%#Ycl${$sz}8`F5@@tH-1 zE)O$%OT5x}Y3VOR0_v~qIcga^QDkEs5-?Fj)}52Cw+yJSf|Q%CjJ}VrDzuE)R0P#B zY$hZ8tGuLM()HHC;(*?+IqCMsQz4!ujIMy1qC)$$RLMJ<+c4rXtPpXTd92dd8dp!) z3jOUN>@f|MElBKdXcnX|snN2q20S>vp2|Actxa7xcz&pe!Ul?J*AfV-I<*&ovm%Eovod%yZd*iwRE=Z zUi$0i2XT}h=C!meGQN!AkoCZ!e{lI&PGmhe+1)}G$r))8!#2BxYEa~K8&Xg_ccZOOEq>SYkXwtp6!?B~ z*$#gO;hXXwe4&~oMYX~6TSeF8jNPr*mK%iHo@fhseWT^t;o0m~(^$l`---4wF}L`(BfhT$q2qdqK4rM5v945flZ-qY`GGWua)1(Hp3NCpV4{$r&q)u^O_I_6*)SLU13~E6p|8hC52Sek;lTu*aeSq{4ax@w z<$`2XS{4)pWCfvoQgE4q(4jC7(jd2KbivTI0ucqZHSKeLxs~_-oAH^4o;Mt9T6d=D z1KMe<^(4em5}NO?E^BD%`p)T^_)veC+8Rs!;}o^!hqKXl8$)OiYG@s-YUvtp z?fTTx^{Ew-=^AgTkF`%O>*>3=S39$(YwO-(mC3Y+%<_oQMx?9hjOw zd9VNZVX|TW%UFk`mVv2>HHMi3laUi^N`?5N?dz7rkX}e-V^dP;Kqo(5ab8kr%K$W1 zLMMY{!`^|e*fk}$zbWwj;5>((VkH#*7vHJFYl=#j#D{Ix5L#b`{2MXWES3(ujGT`T zZK+p8r2|tx*65Zl5$>W6b3Z=EL0{#&i4JiNaU1^qCa(DBxoL~$@!6c&wSP&z9Y)Ux z66mGUQgP&1 z|Kd9*)jQ|z4C@XVtoZz_>SCk2QM1nIjf^FOI_s*7cbO%6rnh&9w{hY=57ftGw?1y| zJyiVw5tr3_0D-A~67Wn~kCCfJ502NXJ{;+87OHlOyBmxLjJuobpJjH5>SOw2y2=LX zpY=jvZHy4HHS($B)GxkI5A6FTB5W(exoN`@Te9ixP`#o&7E|caEf%UFcQH1Ekscp+ zJidY5NVeHrmUc1#(=4%wYz#ZL9}M zrQj;O9A;fVgNpo;sVCoJIYJM%*r%cg7kpFs7PH|$6xtvye2XF1C2+wv1!{nA@e)@N zr>GiHNLKcI)=2e4ZId~?pVyu%by&2z(0trxL)eD!1cwEyChgbfprVV;RW;P=SkjHv zZb`8YvCi|<>g^M@S7r9kY#Up89lxJ*J6!5^pw#huspsobvc>%^HC+`JQ_vwl-<9B5 zBTyXBeA{{YbDuLbPXc|UerUGrv+q|V><4>?C$uKHx093& z{+Qu7*QEvMm&qJv!@uJyxPvv4ql>df-;*&FEW(n;Nl!u4@(n|6tL*uTQPeNbzpe*V zUqa*ap8t%`V?DF>$+(I!XzG21zFT?t+cDBdl)L1`gIQrKG{r(i_v5J06!`DlNaVbU zCT5~Zd<2~+{-0Qy4wa}Yo1!ac*%~N59f4&~mC%({Iy!jXazx^BD|6#AF(e)zn}VUU zbnpf$%6u%XLk~xn3W?Ax+{D*QR!qgS>_K#WWoIG22})=7&9b{mjpI6a9~6cT$w!-E zsYo1@O2+e-8^h7PEQxG1iH9Q2W~!kp-;I$6&;>s6lwbo|`mFWilM_+02O z^lR>J5O4J=1>V?-HN5@_c=*D2K#Y6-a1=7kbg|gyGCMiK*`hPTnVn+MW$`*W1PUh| z*(pJhP$ulV@sua`C1a&bk|0XZ96z0ypoeH_Z*>>KUdS05a}7&mbysFgR?MlgE>TSX zvs24P4u#KpJC-^Ys)b69466&$E%=JU(Ts%(J=N%rrERTHqdYP+)Ieq)s;6gW304_r zD+USSmYQe1&yM_2D;@@m4UXItU8`PurN;Su&3w$Wqp4d#$gTRA>H#@!;)F%W&kB9c zaL-;!^`BATD|h}KshEE|Rs`)?z@P_yRrq6}&qER_K5Z$gnX0In6fC`KUb^NvCp!rx zuX);R%Nz#D7EoGTkt&Fi^eo%tHnHmxWOLF2sBd8)la=(yF%`(D5$a1!g3NAu<)`PE z1bWUi^@Yq1a*XgF7kpkqz847cjivgjOUwO+^Wx*Q;UBN0Im?B|q3Q+d3pp-<_Ts?| z1^Tiv-sldtG_YsQx|qD;UB+! z{5Lo=O-Cl3xDg+@Ri>6EQ9{_)TmVQWLAQT%zwfviuFU~hd%1Mj_$gxi-UhXGZ?)+A++AvKanFSO zaHc=gt6$bn?kCIoy8v=Ol!KyPS-03x9=6p{f$uq|T_}b9cHomPTf))aFDALwHU>u; zYHSQrZ46@P6p7e5$#5}B1kEi2o3rb?M9(t&?Aw8m&{DWW53kkOy*w-pf_)fRu}{ga zWX85$MGxpy^oBWEO3e3@a>ll7&us&q+xm1Kvd{gesu={qe?>wnKnQRg)B|S2?qL#` zhu=bj2IQ(AetR5~kb+x{P?N|O2+8+wtGQ=|64|UGd5Etj)%uKS9YyI&`X3#Yw00{}`~ z|1iV;BSC)mY5x#;01+qtR#E?Q{rsOZBMzL^?E|{-9Df`UbNBfEhHRouD6h*nL%>|Y%52%sElBpE3Lj&u1eVI9 zvM2?<47(Sv-vFsM4;x&RKJG;l;iDB{@#6M)+3APb{jQf}@n^l_8RhK6GIm@UJN{T$ z-1Cq%qL*v%%Ln1}nxgE^$oP_}_|WrHQG`iFGAXn?TuPntm!yRc7OU{24wK#hQXx@bQ3=!t)Ltl5+*&_~AYBTS?8j1&ewZ3CA`iIj0Y9Pz%4jAypP9W`WW@l=g)}fFF^p5NQjfob)6SU8U!gOBlY4>&mM|<5zI082}nz zhAXMINGTWG%&1(fDak(VDyS6UX#!YUt(pQ~x@EaC8aVMwj^T-_a17e_IsUHF6``d_ zlqP(a?UyT}^Qr{c6(Tg(l}j@&1{f5z;J+WwJZ5o{P~2}TgLr58}O zBy}v!(k07i!`MS5e4qfH`aP>w;^>VhUN}Ixu*n_@FRr)8ecr}f*!O=)CX~!nBi`Ep*_68j|5$9dF#aCyGcORVin!@B|5wT)oR3@i){yv|K zF6+)+j4zkQEW`_7KIoY&rd9Vwk)B0+i>IifX|wiuj-|kv-XA`?ejlcY}&P#11yI3qktia zw@SYXMzJ*Fp1SWN!F}fn7NW<4^vC2ONZ^p^4>FbNaB0&ahmx*wT~hmcvL%b zy;vI(NhFYUkbgE7q%`vq3)f)g#6;=Ys_*2Za z7_J&QBKo7-A>#hX2cyDiuKjoC9nbIThlL+{d3SB2-IrJY z6g8#Z^m#t2e*pPsEO_(Rk-KVhqr2T^4wa7@+Jqk(D1!eYLfVCyuHqeaQuB&KcMK2s zTLFF@!_Nj@92?G72ZrjIU0!|Du)WTWM;hZT5$1c=u=r_rU3}WRwqi+supLzr*XBdOAIaSYfVAVj&0Yr~I7w@qKIuYeTH+P`$R zXQTDz@G84aJ+b#aSG~Dym0w|!;*~4<*}czm;;YyD(cmm(_Zje==+(&IPTM2h`dQvx zo^F@=f8}I{P@f*S+!S))D&eZ}j;z0ockoI=bLzwDpZ##c++X_J`7)uFX}Tlpz`zv; z<47mQ{ic9Ayh<%A80in!4b8F?4RlqKytfspAbj}EMRMN_H$Vcg%5X>UjzCi-+Cj>5 zO;hCTUNSe_A4#Jb5vx@z6WKZ@zAGZ+F)i7?PzG{^2vX1e5@y?qY&GO{oXMQa8yvYwWB~>jvX6NwiMISGO z-SO!T>b`=i6VtE~nhA%%_nOjI6QCaM^{t#xOZUgqsA9CVau}WQIJ{$A8;w+b8TlM- zXq*!%PNzH$38qp$8~LM(FtvOK2F)mu`o48!PCHH3PufAr#Ei4UdOWfo^Bud(WF#gl zpf_#3ldk_(kwcKtK(y&$UCdMdnli!o(SoF_{Ui^n`>~E0)18`I_FYGOf1}rriXKv8 z0JSu%ivd>$r;C7lV4h(e-${nf5KIclc}MI*ZHv|REBXNlE=J#whuVU+$xZTI-xF5( zj+wn9!&Wf13N8|{c3f>Zz=wNHtdPxX3+C-A=FgRwKJ=cGRDhElGfCcbe_?R1bMy55 z=7C2o{Y;S*>jU(fKT$@QJc{mgoE@Z3ODZvm%Bew?uC;MA-=>hP#4CI~55iS^!zNLu zp#yC&*CDcA=loq7`C8H0k=$I9xO_hkKnwP@Z(4hG)yd7D%>ol{vhX*av9m-)Qzr4h z?_nd0GUg=t4N7TUEzt5b+q+o57+H)o_@2gf=}_P+anevH0SS9W+0++@bkNF_PcD58 zE**0pIzE?)_ln9b2*)f+jujNEHEk5UZWf965{~V~msvoEsH~tEId4~Dn)>MIM&ZTq zsU|3q+Sg>)*Jh)g*)b<6j~G23@$gv4vT(T~EP-?#-%B{YS5J#A+Zr)a7BN!J8nL0eqlwxnM{UQrjt{YJMxXlduaXA*H%>SV zuXI>b3+Vrp?|*RcH8GX*bGSzLP313>2tPt)x{=1g))M8v{wZ@lG8(sts#>42M>FK; zy0Ind6R@Z5cEXbbrk?Uq4$#8N)sPT%4o!ZgiO?8-Ed7{7r0?&-6Ue3y_V1)0L#jtG zbw)Rnm=qKTP^Hi`we|CV*GfCOf>uc(!190CJ?a)IaRNEr26&><=qa%Uz7;Lyi)k1JFYm`_ z{qcmG0oY(b4N&)|(5P3CU6-(0>IudEzK-_sF3;X*yI;yy{bWBq)W_xEYWF$Nod2b>7V=%`~Rr?#-oOwMO?Ic?;q`#QNB_(O$x z=X$#C1aMlWXm$h-J$76mk54kGRe`f|C4kQU#--mUpYN~42743sPJX5^A`;loD(`cy33RRt&XMInr~H z(wAT6PyV*z93h`z)%SxsIb6T>RMv%zhkc%B-(7yL>A0aNy?Z#wv_Jcs@Y{xF%tCR< ziE~@Rwna9&E*zx6 zZGQ9?U4Gb3ETDRcf&&n6!1F*C97ti3l&9Qf=_W9}-=FLt#^FQ&EQy1*z++Eu6y}~Y z=tn-}#-Wt~0tW!}03{}N3@sqC$GCS6M|rPAq%kF|6yY=s1pwVgDJ;oAUFBOTho&A{ zhXz)0;Frk&27pnM#4cV>+Md{iDDcgvz?Cc;P88%A6f}s&st?ex996}>QUX>9U!b@2O=t4?$PDaP5yf9~n$uSD-~ zWweM*sGpi;i20Sm>|gb%NTaDd=G4P8X#>GBn6?E6lMJJ%c=WxT7iwpba9A|b^mP-9 zL38n9VY%r@5>Dl8-Kj$>MFN_MzLSq18~2H`VQL?`B$;@yVIdyyf_@)LV9@LoU8hUO zo~nmcLeIu<6o4QhWQZ|@7XpBBI>TUN>U#`YoD%Eg*8GrE1@eRQrw11o<#yZTUdbsd zJNxan>j=0io35i@7Tem08zSpTyczV_@`de~;;Yx3saN0WJ@V4w>Mzggm;(Y6Ey znKEyQ3!-0x<01JV$24?@OA85Vz{*LcQ3$M)qe0O`T#)#Q`l<-rAwLn(?DBz2?r`csm)6-C%qR{OKHE3m2l+r($Tb$D<(lc@&=CD|-dL zYnavx(YN321aDiaUpdK1;&Z#)?2Q(B!z!WsR}7O!23cLlRJ$qb!KO5nQ$^Fg%+Fjt zcPhN1qRB2YAfCA*M1zv(Qg0Mi_3+Y$$lSiNZ6nOFq!MLccj%NZcjKg-)4mbLq7S`? zxue&i0_7rpmXRhU&f`AqwZx;|=wRo&UEX3%8)ftR#RWg%9NA+vTWZWtXSGeGO4VC( zt*o4(>sV5y9stJxmupb+?qdI^;kOP21qE+cwGWp}Ty9%reqJGSUb|GsSnCa1uV{ZV zm9%Rz-07qI?16a~(Pp9Q+G&5^sVqy&^`>4aiJNTBWh}7hDk|r~S8J-=D9G$d^KbyX z`UCNjt`FOrvU&|0Itn7;(J9tWmv?NF```1YUWtsVnpt8kBB9g)3=~(ci@^nBW34)jQCGegP%IrNWPAH-ny!xbY%q_}PiF@R z1~!Vx(s6t{vA-uCPt*-M^TkD>Bz=k?jNE@J(iC>U@#6k*nx(a-D3TzGRKh>sA<9y! zeN;wjp^1RkNJir@6kWgmZgS4m47{Ty=le-8f!J9K!?`xZ>Uj=lZ%_b^V7gE83x$EFkTW-~!9W!&*g<)y(`yn^J9?mtpbpMHu z>9fM-wyT|XE5Rm%v-5xL2f-()?|v&Bt^4rjRn3V5cdhGVuWsa&>W-=(nAJtfdhJ~(Bc;oTXTUySBiAIJOQdH-nKYlas=urc7Q;=^ zGJA1WJ!wtR8_)CMlq@!dW+sNV46KX%Yd<=S5PK{0#T$xvYCr{r9w612U(18lQsUCQ zTk+UH4ioy}zySrmC#_#Y_M1>x_8@QL&UA=mry zCR6IhExvEoSPOeQ_D`AOL|I*tR9tOg1Ruxr4Iw$;h5g&=P8I6Q&??@}F+74e++`06Q!#A0OhAG;G8 zh`0aw+m~3dsjhlkT|w~OnvKV|zq?~unD^h^r2iA?>i@oc!Ks&i+U&M`R~WKVWN)jy z|7-n~XElqw#Z@I?yidH@)nK*kP;p`NlnS#qoMCOQRJn_lqSOY;Sz^~N#`K|xOtmR% z&OL_GoS00pg%86eP^31GjaRlO!yk$7QGSKOV;~t8!)5Nr^Dz$#T59onuo#A3u~LPc zp;z)GCCbnByizq;Q2@$AM(pRq>;I64?3KbM^_Nr9`M*~2z>?%YCL`uopY2ncUww0H?LPZ2!F>by zH>FHStRzVsF(qY*8FhMUlMyrapOX7JCzeGI-P|G`b?!;5zZbHftS?lWSgoJg*BLQi zcXmL$ACeRAgJ%JVe`T7hpGpQONYaL!hbq%hOr-2-8ub`h7JPeW$k-ovqpN{}9ptV*j<5 zse@3pXOiG3I6z`*!B9#PEL)~Db&}N0^hQP^ncfT-HV%v6oas0B@#tJU1uF#pA%i5b;QjI>L|<0iNsxqbqv&<&-=z}4jRz$*^Iuz(skUS3l*<@o8)S+!N$kqVQ|1h z?|_HF!Pj)jes5d-Iu+}cHa?pk2Khkim5@SQ(96J}mum`qaN8AX=VX@3<^>3Sx>g*) zqYP&u*a+Pr6dDRZ{r%t)J(-Q!pKjIpa#K#?+l`F3S{KVUX}zi59Dd7a=!vY+0yelx zg}wLx2foWd^IyRN@OOL)>|z&l@?y?Z0&#E#K>X7REri9K{S@BKb;uuSXS>EG+#j5snt!J9Mt@kZ$QJ_9Am1HE|Rqt4$rMt68Pib%=5Kz!m2H zImtbSe*b9xAU*#KZD`Ab9cu7T!q?x5gj0uH?}xc|)d{;+mnyMAP}I5_-?~~?*vU=o z{B#2BgT%U|rSMOsq;Ez4+ON4fF(9G&zv~+cN%87pr3wAC31{8)JCx>~x9}R0>-T{+ z`mF=p#DRkF0e)2fKK(xUB6B}AdDJ0%bbg1=ytB`|{s7`cOU0eKz_+_+qK* z6`^dg6v~f4g4E=FYuq+DL~dT=GHRorwNJl)L=h_F%zK2rRl?VwnET0UFWHNY?1g*m z+1CFF7saT5hVbK3+-&+RbBsCxJt(Gw;HM+G0u!~|HfUlED^}AQl&PxKoFbxEv6UCoSE(>~Aw&^}DB=a+p^!t=O<%=teK64i@vk0yn|;-9 zRzCxhjXR^_&(FMrHs9pM8sMDtd5DW|nVBS9Viv)Iwt+n7(IzCS%*a%W+D;{*DQpgj z&PM~`x@bfBJ>~tWUS@#33KCz5&ZTv$0&7k{yRje95Z?)6J-I_%C}?-~`3(2&i=pLh zT8I=yuaJ07v}!Iu1SY6@SsvQGl|oxI3dvbv%d&q3uHL66Swkb9DTt?j@|HQ&kHng<~lZ)U!FWXSdUFVgMH#p|6}VQL)|FG8Dj@h(-|&q-p1KZ zk#!~jz7g{)H2*a>L(ezsc7>^FWfg9+T?0)h;ItmshxuCKB6g!BPK!06Cj_yELuIp= z3{qLcOJXuL`d0t0V-8n_>KP$q8$D6;`-5LTo!{AI>sB(fTh5lUuD@TE;-Gh32$wa) zg2ieWu~@t-WLHF5+tjXI5tlC~O!8jMNux?)V(#22Ian1M6B-4Uuvo=z#-CWluLv=$ zP*!m<6fwnpYN{_VH-`GuwhZ0sQ)-)3*l+);?`qb-(PLc(h+mGuvA$r$<`NjY zlfD9%1p36=8z6@jGNq%?!ouS4X$wzKShi>4+rp9^3!FAFSl=-C!l3AF>e9oXujT>7 zK;;pihbvM%=AZhm1G#kk;EnXM783{b@8R!Js$wp-jg6>6yPo$b*xW0xkr(Fc@7X=| zuEeF;%y83hIFpzISnsg=jqAoDik4oTYiPe8owR2gv&2eGJvc_D zj`jDGVI(>R{wf?DF;LEcU67=dN2(k8vqjEUMS@N)UKWR~M^gQNx~+z<$Fx75$fyU!N5&+=0#`4Fjcq778oXvHgtpC|#_hSns^ zLbf}up9mR|^3Mb=Xbh#PFC;HMLX1Y!pD1^p-5A6!gsgj6I(1fa;UlC#nzKauDA)@z z8;|1>Af98G0$-4t{lx&9f435a0|NN(b?g?BVn9&vHP#$$Rj?PwS59wOP+o#87_2<} zHqr35!kl5CCkM)`{Gy;pE*Ll&pbXSu7ZJ2W!AyMgCDe9x1cJsAk5JagOez&Sa;IjQ z2cSJUsL0!KO()zEA2$&fKyAyG(o#Y2&84GrjKcis`8g( zqGrQ9@Pp$n7cb>+G}FpV`R9uu^dFQ1*jVED*Wo97-~V#+_uL(Ke?pB!y@wcmU+PH@ zAVMBQ_47nS!@ImPE7(+5z^+IIWzJtToAM>=bd)pOoeU_444g$BHadL@xL=7>;Ucs) zerr@&Ws0a;y%=^u?5BgeXO7@^Ia*gI6rF+#CRv`fnH%6R=+%WVWl7}GN#=G5R>C_X z!FInnstr?vwNXZ*#Rm3ye!Z*iP_4g>$EUp zzN=CCkROzPu%Uh3N=!Er5m1Za(d<~^AQkxDwX;_v z!4isc@bIKWXn*i0pG*Y7-l0eyy{NKn0L6DS%6gM*e;@t3Z?qY%J2yh0UsTyU4JeyB8ekesZRo{V zNISO{ZK8$t=HZA^0X9P!T1*yL;%Hsd&kZYwtVwENV* zaZ;^j&Bw>-*G<2?2_;RRxs4{-(^%_|1wT9K+M3c`9m5LF^SGf6n`wSq`=%0gCVUcjo>6O_iQy`;R6;a_yMbfD#V~3WH^DtUK zrB3D)^i02FrSBa4Krsp6xo{~k2jq)`M##CTZL?Z&dcMKFdRdy@XqSG{0Qbl0iIrrOqXGZnKBF%3SmeV z5*um8_6?W0M5e=#A!f`Dtbz(98S6-FJ!N91)&^NEle5=4F)gSb#e8IbC69es#g zl>!l|(uoRr6vVX}Vn#dSsVEGjZ-^Fc2eGXnW&~7Bi(ZE}%0?>7#ua2LM?Dk|;%d<% z>_r4WXOtl{ueU440B=WVY(TD0^DP3y7Z(+%aifWE=@?5%We=jEs%gHu@Z8&9+F3Yr#*)((Nr5Xm{*Uu)N_(M%J7g%CX^BVMw;`={Qlz+)S(a3u{p(BAG9#YSKxb$ zd_*z-{?ctwI@t^qOqrdIbgvNoLp<2C$iE*E2E8$3<4*ayzE#tq>4GF-@pNOUWztJy zQT*t6<6=Z_d)C^*(e)2zT}IbuwuAj19uL|Fa$Uqmtnp6POV*#KO^8P%JFlVPy#AtQ1Z8Z-Mq^$x!WD5bJkQ|@ zBG1j0fN;-qxOmYm+@Yx{BEq3^UPF_tOblf-D#3&Smd*E+^lU8_V_KWj^(U)sR~MHk z)+z5d?1#Wtd^+QIj_q1fBf_4m0ih732)L<~rAB1HNAzfoN@b=)XCy=*-Ev_!;EPE> z9mc%GjciDjmdf0h@C*u93+abj4G}74BjvbM6olml=;_R~WdqANnb0N@tA8C{sk$gJHHDb13oZI5-L9&;p@oHTU%u=@5X z-!x{kivBnpdaKUQOlolv39Vr98j=_m{rgsvwOi}YgHziqid8xfHcH(U_{Q7$szLjM zg*k*fi-#cpYVVSQvK^kY1%x(;dOBXqro~&y+WqC*oE}Y!x6Mi2`;hZ{3kHtewU74@ zZ?&;lEa;iKYjN)V)(|L@#4KZ27`!{iu+S}NTM}=D&iONZ0%CfX|Eq&+2c@8MLfygs zvaKLlySJy4mjLa8g@1Wi(9^XIIxVYgE7b6x2ixp_n_h1I&+ykBqBng_Jz&n(J5Hl6 z_2nU3?USIk5gW49wOo9#24=LxhY;x(m2IT1MPaW-Ul{Ay-|I;JCn6yH#i5#z-*31n zP@bJrF2~@cyi|l15#uMK(7gR9eiRy>OOz7X-h8Bt1h*MudkehFX9ur@cdz&UoEBwa z;;Lq~?dO*m#*%bZX~JO2x=X$T_QLy9@u|V<2=a2DuCObZc`$3}!(m3~#k1Yk_T9&W zN}9hqCSToL`L-StWU$(1^W|HaDmz9rbkrCAsyS-beDc$eEw^jG$-e7~C+YP-x2Wsf zp5odk>p2)7k6~dJQXm0#xYfYb40hxiE;Ns^Uk_JHV}MiUZ1S#DkX8kt<21n=0b>q& z`db7epVe$ln0w*9?V0Uukae}U3t?R=maVIkGvTJdH;l|vw+3S4%JtTV9>SnE7`xfz zQ5Ll%@r~#8G_qVg-h}(RxIAbVDyM|3U}#Xj2D!OATx=A-7SBO{ubY}QG;N@-gLtmp z>hyU%ME~@M`oI6ROe5C)NTU@D&1LbMK^pEh;WqA&?P+RIW!9OLOFm z;WNAI5cFVZpDC`Mo32ijI7MG|51t?4Z`&pG5HPK(F={icdYeto3AvY>}=hXxJp9!WDPaEiL%*7 z51|jcyyYi(DoOn(dGx6Ivi(|>E1LnaTe=M|saFD(R>Yr{Vmk);&=GTKOy8ymA-5OS zP{Mv|d7F7VhBLB-uWiRrmDINwVhK!KMrfEF1KoiUH+Sox_HdcLR^kXDY%^C(U{CG2 z9g}qzLEa1JjCf#fJFEF|efNk7iP$@b^opD@x0xnK9u=R54{+m5nC9zOlhp+p!K#dr zUVG?0Nl71YTV1G44u$tOu!mvA)e>Ar4DE6@UuT4@Z$n1y+7%9UTw3$Umi$K$28+jL z%0Up??T4n~e$zk#D7gV2(&Hd1VG>|gZwxkM3kz2ZAOp&rCcecuVOE%e8okBG#YKW+ zJwxF$5M4YhP8iEKI__o1iNFq%-L*q|7Et6Cg#tpss`m&ZZCtTX9?~4_e*4;h?DvwnASL~ zR|4l;=+@(I6z__qT~eiqiH%CdBFw%>lsTEq(BD#!Dk9?v>9-q2-2DTVdpgqw_}Ybx z<@bL%Rw-&LmhtU-cStP$Mbc8=0^QEdo47HJT%XIfCTE$uw%JmP?U3wUUXP$|1XD|` z?ui*9i}#=z>REja@J-IE`kOCEk20YG2<$(cH!JmRDz((%`qUt7;l;LAkZ^qivjOTL zK-v&lRnpfx>v7?xxinpuJIQ8rM~o1Tj73!X0ugBuLuB-x=F1V1KGhNKSz=&JW?!+I zxlE~Y`=P~{f+z^V!L4uQ$#3O};NYas{-s+>R~Nn(r7mnh=VqX#Mu^{VFg(eN;)~{} zk7b~9v(U;2i;f?vyLX7GdS2$GDAJ47C{jNurm}s@EUN}(kUG|}8{$LWT%hVLk)Rhb znyw21W5Kb`o4DQk^<|fP`+G4yxoq-mrs7a^AerT@ah*^(&LZ?D${LKHc1bSWz8X}GcnBV>LVC3FTjtT<-E{S0j0FV$7 z0LTA0Kmp<;ChfqbKIJfgqXuZZ>um-Q!0Am7bniNxR+ZopmzViLTWBDp|;?F1V$l3TJ)osu=4yS?4?#+}3Qfp*7#CmwOrT^)iRrJ9l5p2-@JA#5376bi zLxWmDFduLniZ@9h?0bguvt&962E2YBIj1*mcKYWwxIu}5!A&HlZ(CJZ}pW$-99b?(o#8aw@% zuRLGaJxe(Bm2m44;O2Y%Mw?a6f{r(aL|-s zrS&9iH^Ax6wNO!dD1;jlHlpbHF4lN!fXm$_P|vuY0BEBvqNpG7WQ7f)!UI;j4VpG!A){k zaZ$vL*RcnGo*P%5-uU=)N}a)%u0P^4v>5w-U4H(}gXUii;6!kav5|^2zwAwGQ92oN z!CS?q{eEZ2Dpa`trU|TBM+Q}`_fO+MCHAHcJ!BG-&ZIMV4JhI^QJxYT<5z%f{d2Vq zmZ8PoiO*XBalX=RuGN7<^&6Lk&nPZNiMx*Nt(&gUIYT9>{~EN%tX@G1LyC%^ zx0qMOU&pe&xfxj0syK?Dw?B!9H^b0!(>Y8L#TTOuLEF7i<;=1D!AJ?%BQm zyUUzP`oQkchhkS%FD!2|w*gu^Bky@VX%cSdsrfv0Wq06G+O?@wtYS=}L*fS7fZ#ah zIA#QLe+B!|cQCI|E+oh~9yf-bg2K2t&|9dG|0{S8W~85Q6dE9vUIfq z3y9tZW!DH_{Ll4pRW^Vb}{%L9i`Wxl?wb8cbIYW{vIn!LxR3%Ocw+6MIW znG6jx?;KMw1Dz+0!O6V+=m$EE`F%3Qz1g?@ueCa-mEFsqb_yH>J~xkaRbN@W!O(Ku zld+Bba@6!%Z^of_qM$UV=8gLz{)l|obvyjeD2fr*^hoRK%PUFhCb@2#>hA`rN~%i2 zH#N$?qUJ4nyc;|^ukifB=k(v4*E)jBXv~*Qy^j<1_0A7Ig@1qR6Ih6-)RBmE{V&4N z_ZhkvBt2M$_UaF>ML+aAFXh@Z6OmJE1(B?M%BR$6TcNdz2#z`BIt&2=aDgN-RvD%W zTM|PLeW}fnGDtF+|aY3lRbx|7$u3J`VLjb1lT! zWSj@OGF2(d{uKwp7X_TKEjLcq4aZ)qe|{dQqeNFSN`Pq;%^C)Q@i+n)>!3IBF9hZu zLn|wnVa@}b$Mm71N{y@`x-S+DVLR^wZw`)tPP}NTY(ob)9l=|?3!x_Ama+`Xk_(v` z256#@K?X~!`J^uvb8#Ic_C&U;AXLW(*acyA9e z;aAEpY_d=c$~8-laSs;`sMG76SeG16i66EH9Do)?)hQ3v1BPdZp+J3|0e_DZ%t3vh z0fKgb>!2R+L{fsvazwQpfRbE_dJ5psqj*`^#q>?|hCcKny*wbQXx#9(Qp8*mOJ@nQ z?G?A?4Tx^uFS7!F&fFROV&46@;0rw)+H+o@UXVO9^#Nyv(Ax&h+dyfyKIBSigK|uC z1>ayb^yu4_0iqoDgYrLS!+fCgp-H`hX9fL3n2INJ61YfPiN&R)(N6#)@221HE)C)Ch;^u~VMah@aKKhA4p6kVQlxj)B6-)LPy2pswFm z;L)^pGVexmJLJ&SaH~~RsAE!Btte7U85(ch8Oh?3RCXl`&6%~)EepPt9LGl+i)Zzs+IHoYJ^UFKp$X<#SJCE{cw@ZO@m)^d1cSTL9PG?Q}S_YFo7<`?zq zPW#HQ)D6IzQ}F1kBLz7=Qr~BGS=W7kH_K=xv_0cdGU>a`^jTNbiX~Qh`f9aLnC~4) z%#sG``-qc@Ma}Q}kZ8?UWpSsg##T4~=l0P97mSD(!oTwQ;mJ47x3=vlV*c#$Gdn;1 z_Fpf;zEkE!FWPQ*-r3D$-M3pl`)uw&1zjCBy{zwDfIeh9xzpJ%^3sk*)u7Az+&4p` zrwdx&o3<8@v4+C4L98$dNo~o*cTMw=_0X-Q<^D{$#ZxjT6#w5jp#htq^E zs!-Tw6cDqV>ZCPP8p`Hq185XuHn$kz;ra~xTLfPSFtDh25uMI!Fa-HJZn4O9V`Isg zcmd4J__$Y##|^|H4CvP29b|!W!<;D`CLKf5y+dSN9bQaCFcSh9XpSuS=1oO>Ftju* zl_WrPID|->y`H{+hF?iiQn@50N0;Tj00(mmQaYO^^HR2`h(szP;ObOL54N#Q>JFPq ziH(w<$Py^7C<;1g9YXs#jn1ZC_SwNF+rQ4<`0iwT!y@ku?f2Q|+6UQnf%v*WpS%9) zgNqI=k``r$`Ty=br%qA9%Wkkq-C(+fTjjNWUzV+KcXm#NdPzqAHFl$VN#|mA#mRR` zw0F)0w@#jaowCuLk%na3oL~R_`A%?Ekhs)-bU5F{hk?x8C26}!#6G_@|8{Yj4D;XU0ayIqry{4lo&zU;@dPiIve z-fzZUkFEI6%PUX+7?vt@9r5)OJLVaky&HWa{yS{U(S+Vh!Xb{Z@PnfM2SqU=uoQUl z5edjP6s8>jxg(X_ z%>$sPAS;)>kZnV!r{VEFuzpq|#3Y1`E8huHPD4~mOp9`Xsfxzc(S%rm(2A?T%V3g} zMJJMoBvMwo4<>64bq?|Sa)`0;!bBiYCZ~3{-Da&a5hQzew|1|*W5VYvaYqlI;^%q{ zpUkwUhR!MPu!_autU%~tHjU2Nb&5uN>PBkVqOJP2JCLnnwC$FOr-^N{Yxul*iEj-~ zdCbfcPaZzW@`h4Z%oD#|*=ypOoObK*iFS>Dy2iiJemYPQw#YlyCUs+WSfJI8e@Pwx zJ*Lj2j?5CU2z!tF7i@IGEP?3k&7{pG=u=UGE9pQWS_}gkrfhLh7#c;CdCM% zLHu=i9fDAem3ZxS?G=qz))wg;3AY_)sN0beTuq{((p3U$7(OB}Sc{N#C8pJ%k#Dn% znWr6VsiZkto5ZKGpG!lkQ>!7a%{m?g(_wbLW|dP$GdxLT-9XhnueIhk3=XNsylr!^JfAx$-5^|IR15D2#;&5wV;KDt;^ zAw9w;@#a7S7x#&e>C$Li&AJj5LE$;Ag|f0}=LpeXBDxo3k;K>M?phewyNT-;V!4(G zD2v4(FrWAouJQA*V*4zh5lvdTbec?+g~`Gw@jg~uUHVFuG7ZmW`?-n^TF)nS`|THj z$NE!Fy4Xq}!{JCuft)^~Ba+yl8X#91VuiG9FTo;qsRC2BHGX}1gtGJ-oL710k*$0*4^z27X=_@P?$qM{oGs2!9%kD`C|Y+3 zvU@T9LnUw`^R!z=kc6f^HAHZQG$*6SPT9c;hW3=kP8!lYMzfPGkb1Z7(bmx2YV+e- z*}*r)=)w$A+;Ntvc~Xb6rer~l;ixi`DU_~z8&uZQdNQd4Y*pmelHjwWaw>N#k1Qq0 zu`FzI1}a~@V?J@3_Qm?M^KckE)%DhH5)>)Uh|6;gB{5q4oLT~1SCSc;rTU_DL4|*7 zF_tZ^fhH_2#_He8tN?bP1gK&eWwxv`!=|zoc>XLx*i>eBaYhwuHqKBTqH3c77g$CV z=fQuHE}GPzM;i7^vlXT9G_VzkM@fAgK{PQq23KH>DKzenh5!u3^eZB-0&A}mXNqCj$sa$ld9ITyiK}!+-rs1`zuP{u&wBWYNe^l1R4l?bNz1tvW=rM+M%=AYkIJgu z{xE+_zhZeT6}0ntwhuDa{^7d5+I;;bH{W%>yY%ZXxlU8W5&CILBxMU? zp-P}25lRE3iBRq!+k}{?;^E6T1g(QA8gV6*AcqJAd{r!EKWl=oG|(;68ue$9CXD*? z>8)HVxrKGB4gK9JiF!b&A zq#r-q8+V`pCy8pz4sP0G`nci4y2_pT-7|+>n;VXO{nE?wmoLu$mFjeizW*W}`^kSw z#N}MT4m(XnWBDheg;|lL7-6=pwq)7+qu9Pef8HXMp>K_7{ZY7;1WHXyMr()snT~He zk4Gr6WyxjN2yE#R6afzl^9|A21qHAuWP|DE>M9(C5)=jCY*Fr)E@D-p;6`7enS)(M zCy7BcD=r~7C9qBzC7Sm;k2B&M2-5pfqdf_j2Iu?EjpA|}uI~}1E{AoBe8v3P;K^Sw z4S@w7zO{kr4+9IV#u=a;I%;uJ=wuOl23cjd_VwEWhX^0pJX(dyw~sGEWe#e6x z&x!nZj!B#wq&EN-k9_fGD@5ga=eXYyjC(WG$M05sMt!WG=G>T}{-l5B_<7ngu->Jl z!{-bgE~b#)yn}E*pM!zdQg>@M1)voB9X}uW;Gii93QU|5Dm&6}{!LC_ zahAe3HVL~5keP6&+wkbJ#X_PWBcT8fMF75V}&LHNQ*7E+KyBVj2V3Jh)#tEQkZaWH%AD&2?rknmw%o(jqHN;@7~9&5v_yxxU4xFj4(|A;yWz;kWjupE@$m1 z;LvgXpd{9f)`*+QXc?${S2JItYM7x)8D~X=400oYv0S5L?PuvOV;_NF6CK33M-=fU z_#(q}BZ(9QgjSAr*l3EUDjvi+n~;VY0Jz~JZ~=?}&4?w~Xv;T28U*tiwb=H7_!Ie< zors0f;OV>!Vv?us*+QRImmunRB&UBO=J>l+6*JT(S?^+E_!5KuYxDKQBo9u1`R$Au z>bUH723*#YraXyBwyP>+w+~Nj_a;rPfV|fI6GyyZsmkexbSW^1d_rkCZ8AegkKrF_ zYAJjrO>vK`T;HS%UY}sU;`8y&6)R{k1rap9^&<;^e?D)%=-C{d>I5y_ku41tH=WAB z9u3nR)H2R_1)rNo<`X`gs;oR^RVS{q6hn8jWX$x+H)HR*83gZ_|XrQ?`4NQh6u9DYy>K+$$cU>(>qpmxD?hxJA?GM+{0_*nv zX4ByLIil`nzH-B8tpmP4kRrMp*t_1@fwELTcV$9?QR6o*nf=4h<0tmyeeXi-e>n2< zT#HZB3O>c1$kXmMI@;{wcW3TO*{69G7dyM2$3psxo!xGk-4EwRB_yP@+Y5_MO)*w z`i~Dy`|h4CIAB-UhP9+iY3G_bZd*v9*EUU8SDe@Rj^yXO-FAOQ6PkM&v+N~y*BSMt zGRbDkaI>0*>z#WA6*YBSGl6d9wfB4M=cc$Xu_9oK=fCMXnysIS0sDulSXF!yOZ45X z@=lq=oWQWl!(S|k5ZlQ_gKdb5r-Pk5m7rQ{b5pJ=UZRSr#_`uTZ(QFTHj%1!D&vNX zgy>>`i;2X{l+D|C6q~bJ6;tJ47amQ9(Nk+aw0X}^c?1D0Icry=W+R#X%ccnL){zap z6wGty6Are_U4h4jR2nd$o%{tjnP6OssR~TJgm!TMi0FqnDNKY=`D*QhNHer}hi^H! zk=I!E;n!u0s&SEJi)zG`t-5%9?;23RO%X5-3*}(ZD{={yyM)F-YtVU=Eh}PnC=NWL zhXEOK*)o7NQZ@BZ{I&Ro-eED=E%hUtbd8AdA!ObJSm+SkF!BXrg4qFMEk;I>0h^8?!VfyZC#&a*?3f#=HvWByH*8d{`z{C6kmq!RAN`N1uP87kLHCX~nZQdkAQhr7`D&E@eq0cDG-99&JYLn;aO z8LC)!IS80ZWLyOCI{6AyF{rwLYgdVVbAs@Z+9pDgCL$D`4f9`$&3LGMOZW%Kv-qqQ(DCm_Oy z0*}&rbPRc^g>gEtkPIrl7ajv3^Z-G)2DrlTkW#>sd0dX|=S`!@)?9qq__C?Yb0#8e zIPk1$7xvCO7Vu`;VQ`+{gH+4;AmaVVei*bz9CVFv<~mH!*eqGN(qO_tCdGEMQWsjM z2Uw@?C%&*s%tjuQl=*bbT;%b{-2Pm8>Vp48Nc1OiSwyUR<-Xq!#!MQ^ca%%EvLoC} zUe%fJFyff|m45IuuS`pp?GrTb<1Ni~unb5@=q${kXS%pZy_^_0FE7t2oY+e=*xFW6 zd<{6s#jND=a(yw4+)wT|!r&jTr5IPoRHwFH_HG&k(y|;9SpHRUja3>|f328qQitj=lqpdjzFPP=+wi|(0kXdzM+Rh2&1RW0%%f# zRWn=5zT5&8oU&?$>YQ_s`wqAWXmPhN7TSx(Ji|#Zj@&L#51V&r72W0?$_{0`#GB^@ zm3ig33KQVRIe&%LFf80fbU^RG`G4j6-=U2EdxcM-N*&xYd|uJ^{!XEe@8espHCTCz zV!XqgcZivp>y*c{V}Ck#Y#*!iu5{ZmH)%=xvnW%;KBrJoLF_=as8!Gw<7h;@#SY=w zF*EyILioh%n%()IN$2gHEH)^%WU-Rv@~z&NGGH!bq!_$Z{iV^4wg~P0DL$X~DxPJ2 zFC(Moaz#Yjrn}na`x?D-4*9XE>?7z=nkz-QX+B$gY zjC4Gnb+V&6o_wFF;XCk?R($1uzi8^MwE2FLGaRDw9dxWA|Ay=h``14uIbyK2wRI>0 zSoGS<)9AalS$nyc;(wpTGQXVx|Ekfq_=6YXB7T~@^u46{f?OUK(Kg-;>9qGRyp3qK ziskqd6NUwktYWQV**oY?=$C$_>&D zx_~|`!d>uag2cZeHtr(wi4N%!91H#X``KB*tYDs^4wTEwv&$W~wPcoW%S+5Imp2e& zhkfo^?fARuUFm_~j_|+6X*VI66caKXwp#j(C(7ean|1En%IS~TN0+zUu1>2zZCsOU z$D(J>Rj|jq;|Nc~3%l{A!39R*auEjKy%5K>rI<`MVZ!mfoBbbyp_!1e^NeKEScEjD z;mpqbnsI4NM$Id!L@G^!aTq$L1K+(g=J#vv;Cmnk-vWy9nV5{s@mI071U0V0iy!>H zy!<2?Bggj#8>)F^kLU<>7JiyV?lp_h@krd~hd_Lpf{7y+(JN zHO<^cfwd*IrF*^^mnw+gZ@xIf-laXiTG(O@qGuCCW6?wjjE)pa4I|pF{UE9NG{3Gn4Xp{ z+iyVvPp(|Tf06HhA4xaxaq2F=n2{2Xc{__TTt)!ma*jq#i*Nlu|djv>Eva< z*Sn=}EtcQn>wH#98c6L8GG7T}4OqJPYYOsfENtvs<=1f1Y&ysH)Q=$>a*~Pt*5XKb?>J zt<{4%airxe?zfh+Bt7wSpc_wYSN(}UM1AL|I;;9K{U8UmspDKZ6Gzfqt)D>Rk=w-m z^AFKqTTdLp{Hgm(b&5KIUIIHi6pSYGux^=`=E{L|iH_PM_=E73n8eZDp70!>zkgK` zpG4$lEjC_O!)>d1Qb*Q`n68x-Su4g7lM$zNx>RPBh`=2WxMe81 zo0AREKDd@dRHjN&W1VbLQzgnI+eNX9(s31Hey}+f6E)o*9Eia;iepKHSR18MlDbhW z_2P-07}yTO@_v?QPk&GbzMn>67$NgEv8XO#MD%&7oU65$Z)e^ zd2lfe8Sx@|rOR+!l`ajZLaY&Z&hb~UImW#Lw_M`bM*xW=YbiW_shy2fROM#q?VIzq$yiC=SMii%U z@iO!rw;SnZi-=SeN%s^ISXdiOTVv^|)HI@38n;z|6>z(0(Fl}HHKwxt(2~0axyDqb z0g*^gYZXB4Ed}VW5Wf?_iHTe!n(b$RQ{hN0L@!c{pry z8t?x659=?2r#~zx_9ESuQ}xa6pDygr{UlL5@Z#6s-v31>nQpP!CUf0foAR!0JpGfB zbx{ygORwkXcm3+}j4(i&|CSZK+$E$Xi_zYD0?Tqsp#E`@<-qJUXszsU#g5`Q(jm(#(Zp3&(7$ zRi-i6UVqi>{HkQhB=N(dUEyrbLME(#jjQ6J@`wnZVcZtLxN6m~L|arFcy{vAP*iV# zYBjLts)5c4urti32lXEkY#>x}PXg;SHfPOf&D1c!Tch!SdI4@0_E6k3$U56|KTv6< ze|Zso>1R|iD6ocd(6RjvL19n*ApYklGv%Y-N58^2zq_O8=V*}U-8klVL1A%B1|--u zBzFn6!}jx=>$O1oDm6<#Y?&JW^y60gqn*+lTT}8JGpqIX>-2Aa_uBcW;zrTZ^|h8u&$uo;WM!UbY0$R* zS8PDvhpv^|zf!Yf0T^lbduwgQr(0Um8~2>%aKbqQ>*rc>&V6g{cvZc;WZW=6pP65D z&RpbnPF+%0v8i!WqqJV83a9OeVT}ISNO$=*snHIlc6hqW0aZW~5n-^e%`bVf9w+$@kmsBR$efx7b~5 z#XEA_R{flp+u)e*m=8I$74neQj0}%^!P?rB=TDwzF?hjn&ij&QFF9Gh66=_l ziDL?JCqR|}Y2qO^xfX(vPvirG7NAZ&={JBz=lwcueGO34#}nIrqdtaH+uo!to8{Mz z%rb0;R{?g4rq%<1nzrl^{il|)cdGy&ojnH1mu3Gp6gy2@mY5IK0K0e90q7Cj@V-T5 zZ@F`AwK9SLCjb~UwR1@!my{U54NUt^dBWFc<78`*AbsAwA`uTC&h%bVOHMrJ?{w~v zTAiq_28`rMN3DYu@MoJ*`3CU^Ngt41<{q3=Od8%c|53e`D8=)E_{>&mRmDI~Wu11R z1kR{@WBMJiy6WB_{1UJXjmGrbYB%-sf%~kC8Ed}^x}=wdHRCBFWTFvknw_FuE1D49 zedu7H083t7uecXh#%G`(!D<=s_G-Qv+JqwLZ&*+Zu?)fzVD>hl28)Bu)v2RZfky(< z$*_o?94Zfw3ypCJ<~N_6kpT7$;5{Tqbqe6?N6;>*pQ4~Ke)N+=$#J1_v^d||sfUnV z{53#4DbUrq;c=$hAUv;Jv0go#*B{4Yr-TR`rs7U)nkTGP2V`v(IC@@Z`-b_Cy!g zCumW|l6z61M>yt<_TLt3$QOvrBX?g0rH)Z|L~-)X1k#`PW;zRlX6IJ{d=Ys@9`8Db zzP6OQ1@RRaC5NX)*81%&)od4?u77*=`23GT>{x)TGqX`>%3iw9-I1+LY7Yo*OnvZ& zyQ77$Gm7wA?Qyn4Qdelpk$^odg_CC~r9x_#BWGQ)aY@gY>uPMJuabpfAI^j}cZH6o z+5K?3{zdyA9wq0$6yE)HosHGmfUTjI+(U2Xz50yZ;T{m(HxoCS*;-nYJ8m2%*cZ6yp`mN+$et691MG({yq1ZxO~ChsDeor zKLw}Y0TCi$QE9R)Qn@Fi%E7Hs7E3B6JexzP4LsHY=+qLF)_&9_*jA&RYlt$)ugb0^ zaJQ-`H9Afr<7&8!H>XJ#(q(&vuH@`zXz8j(nUo*X%POR5HI18KsfmDBWfgdBt==occblz0Bqt zBwqJ`8{TlExX@^K#7Mj{2l_#I@EjPn4VwTsEhAKe55w_jFmUoCVE~Otad3;H**154 z?y#&pTmc{!z^2CrnZwOD8)x&BtuQ!{z=FUo&Gi>YbBfEL+mAC;abTy40pL>)Q0UY! z^v<&|l)@jFD-XbA0LJDlhAO6pcew__vOsrD0dv=)DwrrhYaonGwszw#blA5ifVsq@ zIb&8mJPcGW&j|vvXW)^^A^3ea`j>tLTMrEBF*W+hLGa)B1Urk6oIfwF95mnfj@H=N zoid7%&c$|aS%0D@LhLf<`azR92`78Y*MF^w6W_-)VEM!=&AND)7)6|_8}?QwGHrX4 znFHa>UPAg3+SRyqgyXUY?WD$tjvD9tZ^NU)_Y8u{$noEpt!WJR3Mov`oI6-;PTJsi zG|V={zPP}IZ9d|$1;5z^d7xqirVgoCI9+^0yatRvaMv&zYy(V^$~P$QqbB4Up44bN z9Mm^~dV?v>DO5jS7Hk3e?h42MBVVI9m^(?J1=V~$K?)YDGD3XK%_#gsxR-Djz=}i( zfT>lv)chIXmCd8Iv#QOeHQ}B4;2_8YHDbZ+4Ye`4U^e zZ`K3SzDs-qG=I$<)HCM}csGC;ZMxtS^gC#BNCBX^Dwg@yGC#+{baiQ9@(M?*iI$r< zgHj`_e%H1b1GYzx?5$m?*O;+ou|bTgBP#F5%C?L1##ZT|X~6!h>ZipjxNPD#l<%{5~tGA zVi9$CZbc`oc44Kdef>C}zuY4&cmLaYDYdD73b&DE|xprV!MUZ z_0H=%u9`PISMBaIyU(PQek|=`o{{W6J%Gwr@AiQN_*MAMf8={6`Ao9xt?Oh<=jVPY zhqfn4cK`7ZJ$HH_sWr6eyfo z3s4eghtP*dYX@Fukeu8B>4XFx{VkcE`>WCN{lWJZMtyodAooX+o5k&xm1O&Xd?wGD zeI-(++MVh<3U7o?Lp(?~>LOwg;EM3^C1Nk>8rj0$RR4k8u&@qaYVgLMjsQGfgLEPO zUH-ZNJkli&SZ^_q5fHX0JnL?i^#ws5AvC7XWX^*a6tu86AP+PMI z)ZnmzjJ4S6*bY^Qu3k1P7bD>u# z6G~IGx~bJHg3-LHfsTOSFb%w@RWebA_%8-=)kuuL_afwZ5zaQ4S}ztIMdka!-J1yV zP5M{9stm6Tq^A&PA7&NC4x?%k?L#_k)bNIr`zdz3Q1k0B2v*vt5!5yj~5i} z1x34ZLJFWNsG<e!E?%^D~Z-Sp+J5=N6NA;uoz{`)A zLt90ghsyWua&N#l{y*|vxZm7E7JF3n%#WB&^JUL-3RZKSVwD5ua6Y+4=3E6A{i@1J zwXI;t+QqTIV=6m|c?=`qwsO_rjq3<`eA(s;6P;G4+Uf?ZRF!_jJoJoHqT1HnN!%W+ z_9R-R(dCRoRNo1;t8hNfCyNaw^VwN!HJa4IDu-xAw8r_VslKF2(m7S7F9|BoCuTFO z)Kq9bF>fB-XZ9e6h87DgQZJdU#22lcCY)`6ICcO>PosNfsj|x3BdxO7Sys91BWPu6 zbo09ZpU~xmFstvPbtwyXk!3^eJ~+^oXK`?hBkuFvHZN`)K`~l`t;P^^gB{h_5>*a` zVAmb2t@n2-1NzPN3yL9|o>MHc?=C(=^1g6Aj36ck#iz*Hd5BdSX(M4^I99 zqaSdw3W0C6HUK~n;AM2=!c;})+q-tO*28Ra(EJz7NMPz9t3-Rn3=gY|Vg3@BWnh

K z03ugV8Ht5QEs__aftLlWD^jE8Msb1mD#{vxAre4^-qkQ%kJiFfI0fV6XbpeqOReOx z{sTb|qv?T!1B4F=DdihKB*47T69!fu;1S==a>v%7G@>EEJirQ>=sXj#6~=m)C4C8( zO{02);QKO|7|~FK0~R;X6rhn6pXd#QwO^CUpy22%%=!wa9Ti zrKe+TDLz>m)~ebn5`?u_y*0y9BMo_G?^C_N4NH7Hk_(PnJoSV33b)SOjqDzV>+d;) zAdfvQ-c8l{9jO^H)GEc5pV{?=`LHLXcQu^w?>VKY32{@n71>MkC1KjIMmB8nXe$K- zgGD~2sGZ-~38x#}3Uha#D<+pppTziE?5PQ^@A^B5HAgxYR^MeW&6R{b2WBgU;NQdX z&WJhKQ>f?JqW$-o^EKjYWN+xd__Gw-mMOhtLV__8v zVUc|d84whPsCr%FRz8zw`{iFO@;g6ej~I53>^E*E1{)XRE{J~#-~{K%dJ^65T8m?l zU$)Te{K|XqJLq3er4#hNJZsIRmMf6e$Hx3JJtOw ze#)dezXG_HW=pLTUFSeKFN5>MzXZGYTL&0JA#ey)v^wLe5ja|4o*qw>P;1k8Ek(rj z|L;!vc2%AGi6}p&(~e`8_p}0<#G>cIVET$Y(1(t;0l~rd#Z*G{<6sNZg zy*{bSAOJzSf_f36D>h2GNY!R(Db!&hsx4!((OXv`17*FI-dC|sRK1o!tqCew2@QRx1h;h{VW*oD)NzY3Vki)nsnECwO@rf z&X!(hxx>JJ1VnfWb`OR9L_xacup_9qGwy=76KoI~U>#{K&P$Auo2%{@xDOimL!K&Y zcnEqhgW5c=qVyfqa?2g`DvsFw0qL%BA!LE)0yZ0Mpf^w}`9`BEbW_2<(q0eCuRry= zKD>9l^WDb24|T>&dG4+ieNT2e?tOh>YU5&v`0!xu#dX)5UOa0}uC28kOpZw2{IGWM z$$bwM;|;HWJD&fm=Pfr^&7)_5xsM(-&e=^TWF61W=6{(izw+bv>K|`?_ifLQy+&RC zd6M$!*b{i`Nb!5}Nb=;r-J5**^83q|%l7X6_>ndD&YRa4 zu4tcZoRU0B*}Cn?Potl9K7uTxpLQH+x^Z#mF8(ss+*{vW`SHg)zh3`wM*U;YOx}(0 zAHVN`>fmIr(HNxubu{zsuAP(LyxqC$?T1~HZONN<%dh|VV~$bhlfUQB5!gq_=VhXQ z^z%>9yiad+K6*U*>5Sx&z2p#Sy!Wkr{ZF05DEG&+RqRL+#KGXpOhD{RUEM5do zJ{TERt}`jdWgietuzn%OAixHglPbemTc@LP(f#8cxWMkHmnD9>)-lW?@T76 z`uPldnl#0rXH-e^VcTGT3q^Gdg|L=j?6ju|dL!r&_VZP!eh(6E6;%f&o<$uR%>U?6 z&6CcN(%@w}svyt02Co6cce2rQM#_6?e$aXgy;gN!We!@nfm$xQ0^9wTTgH>_8$`(V z)yyj9D$v~C&S^Z-T@Ko%)9i57+#cO8;uz4@VW8+k3?fv673Okt9FOz>bZ;0ZI7Zg< z&QBZTTT?ex!6Etm{C?mnP5rZ<$4TK&`uaYk%ik5VPG6}~Rh<6#qhCqd@hZoe7k}J* zlWsz}um2^TmUsH*U-}p8Kdo;?mW7^b-!9X9M}&1v$gHK8U_`2p0Wj4&20u z^2C*y7K>Kb+SbK~{1d@g<2=EOBsURM8i-~fo+xUN0*8*N7y^pZ*C>+-PCfu` z8*-mjV6>l^X#Y}6yo$|BKysK3L~tK7e}T>?SS(ybe1mH1Xzdhm@Qh;Vioy4qtMWv9 zFzBEYp-+I`Is>s2!zfsdN*sS(+l2Pt@A&7*Q%F3=oz2HEsPuJG6qp{Z0p#`_6D7#+ z=L6y^X^i^_JbM%h1~ejQmQj@fFl|RX01L_Tl*z#qm^B?o0icgCCrbI~;;BauXWza< z0TJ&Zur&daeGu4P02V`F5CB?6;`>qH3E01YBLgf&c32EmP#{XmtXaq%uqm4!6dM5x z6;z;yz(=Or_5Hk$sF{d$i9WSJey8Qyfg?Yc=Pp>YJUC{VzRp$m%ae-ovs3r*xL>c8 z^9Su$_Kntz?}najojJe`PU7yE`s=|t_aB7hha5MRvqj4sp`#O4v3xt4mHsb_e$@DCla{b!5~4c1 z5(@N(5IzvNTZ@mneCZOpy>|_8@U&&~P`kov766qnTaJZ0U}tz{K} zb5JC?O*fk34wfI7+bMlCrPvLUt& z(rW@{n{pRZg^t`lYzFwG>M36)xf)l%2i0Ky3ybAoi$aJcMzQ7-&Y9oaTtnsCDBMDx z0Q`B-Y#Ku)(}U{38G&ykNA0^lu+VrY);ed@;I@E31CM>HN>Hu(0`o{E{*1d`q1yxt zw(AYL&jG2W@r?(+1@W%+M;%~Jqs-2mfIZiOy$gOEtZe{K&IUFn0FRLAJJdj%G86^t zqvH_18$d`s3RK4Y?Lwo9$zh!U!|m#gNzec?Xh>M;IdCoOz{LqTm+GB=oCf(I0%IT7;a@(Iej@TuNo(-Lma1+D{ z+ae+oSd{W$bDa$V?00g-5dIKz|AKvPWQNrCiJf1o8JPv(wC*del%6zq37 zySW?7BN9{+w8~Ogde8b&jDn7VS||_*z7Qn?055zUs2O;6JdAcE)WCxl0gG@{HJ}RK z6dX0c{ld3qifkBmF_^h+!w(H9-`u#9S;b!8_^Mm~p9*#UR(e#*-MVKsc6&KVPIcSg z@}#R1sh_zfUHE9ZwWN2Kel$u{bpGaKdF_W2;tPe<_pT`$PyPh(EjUBu?)qqp-)+A^ zthEm>d1z6;t?l5GN*wm<#(m%ol<~jPeSrOm`owxG)q~S79{~VrM0^7Ba(+8i}U_5jOQaz=BI*6ADRy_Y-t% z)DsTnfQZ1>0Jj@j3=9w!m?Z!raS4?*KbZw8px>8og=<_?ASg0&05wB@f-P{sPB&+< zK>vpg!!Qm#Hg%A@70Q8x(8g)t@q?qx2Lh@zz>bA#dMN$?S{H-}gYg`7*`e7%#Qzi+ z$AQBFE>vJIss>vW3>twMd~6BV?zn_)y{4mi{(igE%{?%4Um@g!Vwx~;l1>Xl%ZYxUi$q;Hwbl$CSg|= z69CjVoo=UT7sx^yDS-R42s|1@W~f%6CICgws47L@)4?Z87VC=%Jcec+Os}W4ON-zF z1@$rwX{36Q+b1C_G^rRb^o5uZkWOo-URsnTSs=YJBV}|zfQT$RwVfI&zcH%v?169) z5E%lD3J?IoPI^YNbhhY__D*gFl*)o=6q>DQgbB|+FV9|VQI>)vHJye={jZ$pIrC)7 zjMTlci`{i0t#0AM)N^G4;alJ5RV~a5XG=|o2j1(Xx zi1tK33GLaKnlWWPaW*1~L(DAqV~R z!`vhdi!j6BCp^+aSN2eq-Bgu1{r}};@|NkhEIO*r&pq!PgCSfW(smVWJC^DW3TaeM zklW1JJeF@S{Y*5Gs#_DDEW0w$LGC~;L-}CX!Me&2LR5x~=ngAjJ{md3%GipS1<{(T zl}7{x#|;hn_+qR-MDSgxwMQ*NU=6Ct1sg?%DwM(GTeHh6B80NA4nja!nASdad@TRB zeM?4nj^*Ym^1Fg_6dWw1Vy16!gvRSEkhVffoY}I6k{R4 zwCW6Hi8^t>^>6zYF4S(3V+cxk2-5Ko9v+Wqu`=!oDMmn>55b4-uGVsmxuV@z9VR~? zh;_<*rjkAKRF-2X?uzAGaawr_wKWUJF6uS0-_R9)JfE1EUgG=&cYGlO6Y6s$+P zAsWmK!kOXO5V8zkk}<~h%BD#cYSBPwRsgimf#_(*>#l!X>Yf_l{p?Z)fbYk2$1aU= zS3&4A2rr>oh3<0scX=M|U&ASgf;6r1lC3Jro}r(z7DSKirMcyDxVve&-j1X`V{27k zVszSKVgN^wJ8}~~xHpNb{kXd^Y~m_XI|sv=h)%OIprF;N;QuY*a^;3+jk=04@Y3bZ&$?4wY zg~s$`nU$QDiYc5>Jf~W;Wl}9lx()a=kRqmvMy`xWT@<|*kEz0>hs8h$C<(-MgBWsf z&n&$lz}&q#@$DtJjO`w`F^)>Iu2g4(Kv<3K#+?2|N`j1vm|~N37LH1);ZReeHc6-C zAridisR}DIE=JgqBq|g39+7mnX!@s)kqul`lq&AUc(R8$d$C5zd2348sSti)3pJJZ`Ia z?oxN@@-gHoRD~2HMUW1PMPE>~0R=4_F`0za2ek(JKE#BF8&%FmDKzMTn9D3A1&wSt zM+fKiqcYEF4FhIoPhzQ=A`Uu8ZOheD2*-jl356@Yeb4t*=Xv`tdcRdTPC%(=WeEPK&UZW zl&WAScCuZiZikK8*ZJ(bEWSI>n5`AW9bOXin2?iEShCN?LI@Y#*0C7M5~Ui#9xX=5 z7?I#<8mnZ_nAGhApY6PkxR0`2@LQjDud!5H-fKpwrx@_4@?IOe!=Fhag^JT0+0)ZT zE6-6a_~QckE$WW2Q(fBy!cxZ8V>iovu`>E<8oh+h5#8>^v@Nf3KV5W!&!V#BX{TE| z4tu4wwhS@aGI`Y6ZRJC}!kRt?+*))EO@aNso2I3TPe6mCtmyl}kD zAhfE3ayazWXSH`<3L#dL&C>c&4;B@uYC_L80~?Vdqpe;a+LLnkwIELBt^l6g9$iuw z-yn2`EfkX5Y6P+o?YP1# zdAgF?U$FUYVAV5D`hx1j?-_5CwqO#G{@9kvX30TI|HWSTuO>5?t;iC+2sjU`S7#H)!Z zEk6O`rPq&HAe@H_V^jmDbBG9^g{!ed*m#Ieijm?qxG@aqjzzNhZy}}-s)2m+ZqZ!$ zRsjhvF25xGcy8~`%K-^lyq(OvY3(xMm7n@{W(l>Av@Vm4P46AwZE1OW?5MEKLJ||W zct;kU?r?Ew`y3l?6wX}g`4zp{zg@84sL33gdjv> z_o^qQRFh#E{TGs(*~V}X%=WCO>~p=n1F>GdBKikU z+!h4jFt&hkuw3XVtDOltY;vf61TTWev=h)sQzk4% z-sIM-JW*AXyz)fGuRO*y_m&nszJp$cOb+rP@!$Aj2r|vyO-jV%NA~fIuPgnIb-x~v z_GD%%GL2`!X<~>D%LJA?h>o@utA$7L+X?U7WQ&Z%!byJcNrPnz>c~WHrpz<9HozP| zxsV#MSz&INY*GLhUR1;;u~=b>X+9!i%p#T!i=^8{(+{T76q!x;=Z6PdiY1Gj?85l! zsV8XWC(8GoDE^?%%sg1$6Zv*uCeOGwz%t;do?GlYnPx*B#fWND#=3rmr7vNH6>x#G zWRmHyCo_|0g1ocU$B)J2P?~JM6U$v~VkWjm#qf6Kh4Jg7v2^yr>pDmo9IuF1{K}Ew zb@sg90z4CI4z6@qtv?O%Po{+k9aamMUY~5@?Uv#vBHJT`Xg~Q5<9-2&BHN%EC!!-l z)UrBIW;VadiI-|#TcC}7L9KbRPv_)#wQA+@7ss{1sS7K;fWbIXlAT+SS)+!J(w}w0 zulU`GEWkmPY2wUao_}$`A4oqXOiAJytN8H2fYS*$QU8hrGMtT%Ln`HDQjy$K=!az6 zR_xH{QE8-raMZF$!lE8g?CT&>gLx~{sQh^=Rp@QQIu@EnAXI1Yg7tYKA}Vzw5dOsD z+wXZ<$2fHBwo=0ZLO6#O^c0F?G`VjcpSeivdZ^QE-@cN_o+FuxBaFX8uSDgF1Fqj9 z+(+K*v$LmKk!I%*?UHS}-yCJxyTx>EknYRm%CaPK_)JBztV}3tKq3v*uUPGN)EDaI zs4SdVW)|>Az<$8b%?3azQbP@!%>5faYBaif!bWq4GHxI6NErz?Bhmn!7a{*4D$DdT zCYigNR7A+jJ$<^OdHwrm-7ZmQD$1#Rh96GYj3x2RV0TCjC2X=0{L}G2swb>iFBfNZ@Mf+*)gA~)7ZC4?!@VWegZBfTgjH`ED%+e$F$~eTdNO`_9cUA7xg3Lw5sw zJNjv7twF5u>a0#)Zu=g0>-^3-8@ri4A6^BKouG@n0kcL~DjM{(@%WZc9;9!_sP{w9V2|s&3l?H*fgkaqEWn7Xxq5ZU_ekVS62rderNb zI9cz6t#9ufSNsUx(@q_eorNSAw%%j1XHgKu?LR;FR>AzGAb^MDgy6HDD#d4je18?j z(-!8dT!3%`v!g2R!Z>UE2lvb#|98XD`->cu{)ms^aNJWSv2Af&+fQz z;6QWl^2>*Id+{DoCAZCYF$xd4xg8XGYX+xfzJIaVcW~0R_JiUzKVEOU`n>SFr)SZp1;GSibYIBcP=bXK&y_tG*<%PtvpWhc0v{$C@RlkdDzpw(d z3eh4{8+J>QG=sZ~uVJk0Zrj1gRu{f&suON>XB|W*y6FZw(puM+9SRH0@g5SnF@lu~ zJ%gTU{|0wfQjD*HUpiuAG88mXBbu*O{sMg2QZ4PQM({}qnP+Ve_l#D(2VQ1`dsCe| zkOQhMZ4YM$V`XZ0+twaVs+Q^wCa%_8+vfVfkX{*IU@DSHT#Afjpz}%Xq{%W>jCgYN ztoWO|?A8e8O9&xrj+f~gW(M8TdO zv5AKlrWPXm=Hc_pW1OQ5tiWHk9Ehe#WY)}GDqbyZ5O9LJ0s_@^chI!tst48a zrj$0*>mN{fHvw;(FmVU649`G zb6hocLXlyvLB|Brh9V80Ct~HX*(88l+OW8}ZPbblBx7;Bh#L?{%N%<)mvMzp=Nkwk z^{r4QEj^#IN2kcAF_N`Sag7tepv~yCS}IWbsCy?|_1t*$u=YC0ajR2CH*6e!-1+7v z96LlYtYw)3m)5Q{H7FBIV@%?!KlRr%M1_St`tz>`m4~g?_4g0)H>}?g7W<}mC*%6; z@5hq%Wru{=%_>(p9NM*L|MA4tt))lo_I{XfcyET=YX5}26C$=-x_@f@QMBxz*B(y7 z_glZ-8lCLhV71Yu8jgXEpAjOFWrWGx3S`k#qybw&NWqp9lCMmmgtzpPxwSeokOra# zBeWsOgfsB=x~s<2xpyz(+zC7L6wkXNbph5JZgdSp*iME72*Ct(WK&xv|vB z%yET?Cph7?V4ZG?Pm@%lO%af=T<%19iA4^hDC6qeV?P-`a#7uYHZmHF5K`Joa~bI4RXJkx{yDggd_*o*)@N zGMj#)Y;|I(zpa*J|N1+%>ID`U^6!4MS~x>G3w7*sgwu4#%TB(`rB0r;t&)?UCGh$T z!jMp>hc%u=zpuxK`DM5>gpn)^<>J2kbFyzLrED^38W6QB|P77~;;q z%(m}dolr{B{ni~jdV)M#+hdqQ9bH{!esJLmjq6LcJ*N2$9%3v^1qME9K$GDEx~{%7 z9rdRlbnzQnI%w#dy8DH8#N|8?qZ7>-p39OFt++`oi~jp%{y(H5|KEOiPz8ykZZkd1 zSx84V!GGs3Kx=b1Xq^7VEQ#(8%dSpHgNm>UzN(dl*|TJ$Y+I%#E+Z^$+e*O zru$$5M1=_mT`e@7%uM1!f&<|TVvF$aov`IPPn8MCYWWCh$d~}5ePk{|ip*4iSVbV~ zWmd>~y#A(6j5c5(JMO*yPkiNFWVynH@>W9Vm65VB5ZPk(ZN^=x6lsXIAT3qf$6jR} ze{Sm8J`W_YQkKKucjfqwV2>gGdgZbv>UCKYaaclcqh6!8$~@*Gt{fb!dKpcm54PII z$zYft1tr3eEQD&gWgOeiks1*A%c6&mF^PWk9{*D4lPx@U1VNQkhCI_bC;Ny|R{|Kz|ufz#qU7c15qGI0HPEFU0i|Cg!)Qxe*m6+6(axJYk~i_RP_I6i{I3TZP6Ob zPzD-0Q68eRsH6CbU4+i8TVI&ePM!t?!1hKj10+Unm`xE85dB1cy%X1rT$k3hC|#U< ziHv!?3Y>O`gQV~bfPm1fk#QnO@g%W_G+QrFX6{7s)e0>@O=^M1cctBOrWLaB{=EOh zmvEFJUMRGn@Kp;1$ZgppW)U;PdY9;T^8*%PC6c?`ySqg2CsnJJ*UUm0tR0f!9`$aEkm%!=xuajDS>F_|zC6M0^iJmF+#Mo1=IanskGS_IV zo3%YGT+i_D&q!c0SRaSw|TOu=ZL8cf{QL6MSDk7x3(78?o z82bS2G$n%~-x23=qUr&E$LbUIt%}$i@)VWpHi?zA>l(VA7cr|qe*GJDdgnNqX^}q- zEOn#~gm_Fyb``2NiS7x64KU?1UAj)IEvVLNdzE;6f3eqx_-_Aw>3`xoa`#iWcK^`Z zV?(OagF8x1s-Lg=z9XWv&}gvX#|ZZ8iPk*E@jZjV!fyqgn{xYITL3FdB6~2Vv7>*EO%E=X4MZRqIi! z2>mZQc{libCZz>*w7yg)*It&mZaa|Ewj!|vZSFDLL7ji^k=gWxXzXx^VmxN%3rj<# z%GqC;Ars58e22JaIZ}2UEytIZBNW98RGIa#4copeYdY0vD4-NSjik=c8i4J@_#Siz z0^n@&WvX^aSgOz+GV`^^Pb1-x-;}cl0z+Woe^$50&r#qeb!>N7>Z=WT$ zHh+5}{#TEW?Tc#EFD#X(_o&$PZtM>&E&Y@BN=@qLjj8a>j{4>o>y9X2ebZ()GnYDk zUd)u3Uj&0+!s>n@f+Zk&SNp#$)oS0`a>f1M8UqewSG2nw^7Y*sTf(zsQa#(AjkJlF zf}s-LmF@f9tr*DqR_u`AAebAPG%MDRS6nq5y)kIdd)6o3wVYpVcd(+?@trN0RNl+$ zPkF?!9m>x}TdU-TmrD9F(|gO;rjRmOehX(#@-ulFSvm3t>v;dN?tu4~*XuPexb%Gj z5@eN(>>%pgkD8N$&~_r$7-^BvC(bjT>lP3zAllI=8YDpFYLe5I_$`Aw6LdvR;LIT3i<)7WB@^wZ((|3PomSq$WS`ni5&BM?)FPEc;#Ekw904pb z*K>0zFYAX7bo6FiKakU!OQ=;*&qf|05hansQMX1^X9w$6r+jA9!N7BGD|lNs?Inn+ooqv!KMiI&m(zX@%S8)b@@SqY~GG z15F*RTCLmXl-j1aL3qWs<*tSF)-))Wao@|Ag+HNaom`8PoACI4b2>-||L^`h{%g@v z@TZhVEXTL|qc`na!GYY%E8KpdP^6)ZHc4DLbZpx=!tO0>OU8J4-Z<SFZ_C+^hU6D#YEVsT#!9dda#x9QHs zWzX4UlS%a&;VNkjfp~pJNv>Wj$IFk%=rU6#MKES%vItCac;lS#-8341ocPp*=B#N5 zEqC{qk6xSk!Sy}1!ACgO^Ftn#bu|YC(bA@x*#ES&=w}HG?Uox(klv|+eyK!1w`se< zkbZ8V+s#p33a1$5j{p9Z3CXdj;3nWn6j>8?fo-H?XeSB1#q;Hn9E!pEp zmOhN+?#vOs58soKJRoZ6R(Gp=z;}uOTQ|+KVwf}_hk8@EZ&39>Ug(sy zX;b}*33+me9ez$)rpDj};WuU{d8R@~eAR%ePa+}=8gjqlgcHB66d^J_pp=5Gr9n0&?85Xv>*SO<5BP!{VTEW0|8GvqFgwa2b4 z*ZE_}>OA0OCije!j`ITnFYP^myQ%BCuWFUc09Z3KDZUx*C)$+drFjp;tAJ;tK1fY4 zYldOVHvr^~>mi5S!PMIfCSF*E5zIC2fOB?-@y=r-o?+DUHhZ;N%#<|o%!U28CT9|a z{o@40bhJFJ?08zb-}}DDY5Ub}U#4ba<)>aL|hXm{dUYqM@+;JlvW=x zu4s!s*9(_uz-gIw_`y96FWU(m0nspdZy@uH#bl3pzur~ z1?Cvnjt%Guz1`{q1A9V4TPbr|gTq5rPQB6T@);(^0vn6Q@}oz&Cq&toxHaXMyh|F( z@%sB=(ptJXVe7^=tBu8Y{k?uv51z*7h{J?(N5_a=z0sE?CQinp@Mx?Y1`V;>Tj#Wf zJma=5%OO_5jOUl>(NzwAEe=h1S-k*;4#C>dX&Eqq-PmcSC~StY@g(UCn0FpF!z2hM zv->Tm3*AjPL`4`pIKkC$h2v>yY?_e3A}EEKL~8}Ypf30NHAVZzVB?#PYJ)GmUwR+E z(7tws4$;A^+BsLcK!3tLJs~=RTSK`;T}tH=-+*p~xY$VC`kU*bMS8Ypi23~08O77g zkGjlEv1{z83QINWp_;ity00 zL__^UH`6=vANA@z2ns#ckTqv+-3POut7T_qedu5CkNXdgclY(?EWMX@;OoSpuTKLv z{4!u`^m%c^iGOO2hi1*^>a&Y~opsO_@;_flvR5SRi+)bab}!PNFn8M-XOk3X^|4oG zK{L$QHgS3T^kIdTin@Gkz`KKFE!k3ZJXaZb$DGs!=pK{aYJdkD$~uFrEG6L16^!GU z%_a0odu5>HJ-5)?+Z%hOzsE`6V>YQNe=2pxTW=5X;Mr#3KmO|5k&vMW=pLcZ%5tUO z*!;3$7eiex@ayu7qkzjoBda2;q&+X-`H1nbOX;XoJsL5tSK2ma4OSdA06Q-iS{I*| z*UUZnHx=18;j%)lj|wgaSW3LL0!a6Rw)bWZq0bWbGa6wD{@=6p>c>0X@U8l9w&eX2 zaPyenKNU$f_4cF^GkZ-cx&(}QDPwH7Yt zZd=?W$Y0?jcMJ?5-`!678u-NLZpiFYl>BPNwdLYP(H&J`)gBMOw$amC(b*_XX!}W2 znTQ5J98T1em3-*~G3`SZM8;rHpCcDWt* z=jAl^4>A|^_umicw^jYKT25VhBCfSpRw|S!Xf!8XxGw3KRZ}|Vx2kX@!u4N>ueoSq zC}NNiSaYD}YMJRstxX4kYG^2(&?-}eyG&6j7U{bEJtO>AVZI_fT$sLak**LQR)*Si zz#FB=_R(~OX|HH5POgKPU*xvPZ3hk%Z(ENxYL>U(XZ#WRyyM2l@aF||f?drC3C*p0 zuL|CXFh5N-HZrYfmfrz#(JUCF24l-+uSa=|IRlw85iv=`_e&GMqtUWRtR`kszKBRj z^I?c(XzAe1sF8*!=ea)TB+eZZj-z$_-uf5XQGe|Qnfd{ ze(jy%G&=X|(~)aOEIRg#|5F(JMJ+L3r)`ZH`9*WA+a^TxhW4` z-rvz%@>kf}1zBIi1*8{!OuH%@MVkSs&qf}HKF*Gx*zna)^HEe)_IS@R>7L`|iU4W5 z?(yvCJ#ZH_7bchsi?W5RRU$4$fMgJ5RYv4C_-7-_Fp{OqShX-%RVZE|44pwCe<70v z%%ZLr+M+vqj_*2{fBSflW%Qn16Rlj|wlOVN@7cvXco6PdCOk$H@%A~_6*Be6*3Efl z?qRctwAO;Qwp>73x3#z32u^6rXom&JG*_Y-G`$Et30#DH{8$fZ8EMuk z;q73)1=WTpEqf%aVlY#O9p|LJu9`ctQ8)Ff+nnoRx2{WXPMGa4DV#9V*HRN+xc5q( z!`zsX39~nZ?w=6bur8YxI%9`}wK!#6+o#W$9qc=dygiLG13PM!8$T?2=+wQ(t#j(m zwzpICJr^#&6MtZy)rl8}M;_;y|498|IN#KJI#A1{nQ?^{)}thzaB%BL0Ur#z@$duc_JiB@Pr@K?DA7a5%$uL>7XW=np- zg-c3tKtUGd4+4sc^6IiO-xq00t~ajEJvfz6S11dxR9{Tkb3w{VP|hw=2F0+qWnPJk zc4yN%h;HiKEAg<@q@oFD*{Wy)dbXNMBNy=uB^HI$zN`T5v&MwHIy}BxgyUZ6A=?X< zTAW;ENo4QzTY6<+*Imtd{pcwI=8eY8skxZy*%ymlIi!si%+ADTUji!h+InN)P6sLV ziv_cJgv(b3gyNTXEYyl*VIn&~zUwx@SS(D`QLdF&ERn^ml38qSq-L!p?CRsG8_lf^ z*%k__n%yX3_ZW-0kJhp+#BD9f0qdxIr0o?dQaxg<^&kaIfd|n^vmWt1NEW-)3KSiC z*kYGl0jrKZln}6%VUd*iC;q}H`jlztL1k)eV4J67J78kB9WuGkjHt{sn{}VJvKE`B zzA}3G4lWU0WG&<#3YlRXoiJg&Q`VWRstL&lbwPG^#<1MUNODm3wAE6dfx(u}-(!D$ zhRWLB(NGiXCqbS6#6}dgNBnbk#`LDY!hT=(%iQ}rtnQcGJHGd|?(Qi~(-y}qvA=Ut z(owT0^_F(Qa>oM(Hur+>&l1x1j!x0ucQ{!(dR1Gy;+043`V;SoXAbX=&(7{XqrHA^ ze6A$qQlCZE1Of~!Oh;)b4ap#D_!1<1aoAa;Y`ieXWg}Ha5pC9MVRR=5KM59EUy+E| zM(+CEpM=@S!W@c+a$~~Gfts*%3VHnTtLZwq4gy{%=z8$@hB;;H7!tQ{`fq;2ZwE}; z2&@(#H8mx1hSP|}ZlczG<5R*Kj0y?NW5(C7?;6KFWvSL}hcWi)vV~-dFjOo`f?h$d z9H>}ft5UmMuY?Bj9rzVaY(G=FlI>@VX(%+MJbfO6#->pZ@ywITR%r;;dOBvpAfZ*h zOiGkj)2Jq%=4ctFLT3qGHP$;|MPnZRfSgq(^(Pr&nrafw5FX=(!XsEwfP>Huc1Is! zID}@xFB+PiuYX%W#;l9j5t?`WxIa%{k(mf6Q)*=W~lk&n#Z1S=2R6 zX&ZVr^laCswr}j@?96kfG=7?Z-;=b*gdJv@smjT02u$no3H(1_pBThACUTh97hNJL4t7hbirR%Pg%_8Z& zn-zEJq#5l&785yo@fJ+8S(Da$IezA5l{3fWFwD4s4svZ&bjxvxs?{CeM`ol$khORnGi`_U#@0V(``t`FCf?+Lf?%k1hFwcvd&wPHZ@O0F0R zND^-CF)3>;GH)&W%}ZX}52$ch1V(s%_N}4X&x=1xO!i1#)|rHbJB~!GkJR>Lq(9KS zPfdR~v@B8h*);PEJG61VDR@K2bfQuGJtkKo_(M64|IJVSZ!*nVfnj9!$z8?It?TxN zl@*=HxPHyO-J-qxLeuf*hXQZC?c}(X#!o0d6_ix;#xSy}cGUbHRgG@!Af7-kCr}e8 znb9?lRa0LblGe`-l;0AV6Y`Q1$9o_8W$)+v^xo{_a1-|u(qWWAJmmoih_P(N@L2W& zauJZSII#Q3a`3n?NOHhy>LT~_4C{4<^J%_)e!PNwpISw4EKyCamG_Bx#fXgIs+rI&P2n!5 z9l#foxhv!cN`zS{{N->rE1AEXYN}2j$Vz53t)Lw!Hyov|a3#+j5?*Gz5)O29xeX_C zmy=vQ{L=danyIGHVdeWNXucX-Ru+TrN6SYwR5s5a?vaLs3OLc6&gMxCbrdv$q0R`` znM(IdyCz(M|H04(u0WmEB}zEk!$yTsYz|!JjlLcC-}wHo7jguIRdUpe%P}xh)F_G+ z+4Ag6NL(7#?Phs#Zyb93<=z*|n@2Cy%&Iwfyh`_$`^}=yjbXdFiee67RiMGP=**N@pm9*>Ar|f6Uu7-|fjJmFlWpv8rwUw9S@^C-LotAR+Ys zZ{L*tpZs&jvf#RVr|xz(;H#+kU_oGl%>p-WfK{X1M79Rx9ZKNx%TIw>gIc!|*LHw9 z0m%8*K-|Z0HHP@XW)0{y{D96+9FuKGp>meR-yoEDuszt5*pnEONgj#ZHS&WV;7Tw% z8tNV-&rri}cZ)dwhB726>&#HVQ^w;a?i!6fEgADS0I?caNJwI<+sk-@@J5;3zlRcv4r05%r5el);U0WM>HH z)6W_vT@=~VkCMCReernanW>oKA+R_UJiDZ%_`-!~-i=+kf7-&$bKBGQI0~gC`^oN4 z{im9qT)y!~GtNS}y!&J0lM=wGJSk~91xwkZ9n9We`kub%%@}N1e;z<1yDoh=)pX{A z3OFM;aO8Q!bIEs)K0u0`|E*;Ew)0bP6bTMr`3Rf5TDQ(=9Y9sakRxNTmL*9ulV@&W zKW9H>KV`g7Od)TTe|pNs>#x@R9IYSXn=)?cR2G>P^ynFNEsiKAP4V|m^Df)|jl2~F zgAo>A z?Z+4sc_8mo&p7bS0YSI`1>4sf6#%lZP~NYAM(S<;%`YGbPSCS8ZOQ}bp7l65GT#X zj-Rb?H|ybs(|^6`_iNQ2U>)-QNit=f{hB#%yUdoxyHF`iv((VE0>oO@n9KFuKQSAP z=h|e-bIP6;>+_D+=tdVSb=J#;D~m4btWQTj7G<(OD?^_a8By1#dD;{iDQ4%Yii>O% zeiEWZYqI~lN!8OWNfE*@mdqM~3FxtOe3V~4;1wrI(&clFOftcVJzo?riU(J=pwA)0 zA%jf-9}jMfmJZiU`IcJ2)Hahi)%`vvA7&KnbCvOmazq0 zDw#^5r7Qi1gbo3`K+gbMSdC*!gVPj^BV3rf%}Lk5DN2PSzrvNh-bB+A?Z|IjI_0H+ zf5?Ru_|h5F+O)Eqm(KKbb{L19bg^f%s)h29ek%Q8`hyW+S4)e&X%rVtv@%_8x+KG# zn{-jVxo5NHea~hEJYl-jrCIY{fsVLb<|dt5>{eOkyX|qo#N}Jh45ya9TFBL3RdS(k zcK=q}xX7r7fA6T4`X5>B7CmW$Tgk1>ZgHEo*V=cc?=!xfdg!+wTi=(?Hq^PLA2xI0 z&i6Jwy$fwBdbcf@-p^>ZPkdUk`M1%-X9|_E<(=#n54S-qH^%Q!d~$9~$jy}XQBt>> zl^fnAp8X!5YPJ6Jk!UwwTr@xNtV`nW)HNq~QekjhLN01;9POzWBhEv4dXikJf1y!P zz-U}YS*V3MFmGW_J5X3@#uAsP1=tK4OOq}j3n-I%f_XFaN;(}D1{Z14ooG^jlPY0( zu)_?RF|Lor7S4me>GV;lKZj_JR!ZrFUXJbgF4G&&KK+y^)PI|EK^hrGoSlhVPMJLgQILXG}jZwBiiCUIjW9085 zMtC-->xGt6Til_t{bH0?EGCl7Rq%nB@FvOoyR_8 z=40bymK57#28MR{*WE!CgCN$7a6tZ;hV4?W8xzA?s=SF)7@pA4^>JxjwiW{0xGbq< z29th@AzX%D9Jtv1$cvjGZDGnE55x|+kx{pypdF0n9y5hK_JIZ<4dE^=bw2DC^zZ)e zLqY!TST9xLriRzqZ{`V2i89e9If&$7S*na@UuKtc8O@q0nkicNNGpfj{HF{Br4G5J zlJ;S}v=?|&GXS4K3m;*~ZP5Tv(q=bfg5k~B-YK62v^l~?iKZupn;Se*?EblcnD21c z{*`Zfu3(N^WIoN5#Ffc$5OBJtk3;9|*AoiJWFDJ_c=}rsS+cOj8poq?YbHB5)B`LK z_{5S6tB{XQ7naEm09rZk+u$YnpC$Hd2m(0@Sd3+|Pee)GhP$9A0O~R}7vLCkKFHCz zK>NUf9|FpSRpJl-;%n0J`dj7r4*Y`~nR(3blv!5D%+M9pt6<&CG5N0*j4}=>{i-*@=UH@H^jMO%((>~N zO>D$M1uijL8-LFLWl|m+lylEPLwA|7dkFlYo}BI|@RDV6x+CRlk#QKQkt1#ZZ7gA> z9Q<_>kW5$u#%7Lzf8_om%j z2mXizYmm9UBDGO~LX$2#2`&oR1A-|)5GGVkeh7S!j{kv@glq;O>tU$1Q42Kv@bxar zcVMmp&1a~f0cabb_60IhYb6d;_GF`z0lu?u1>%SShXZz83EU1`MHslIumqG6vsIE6 z;F!P{fQtZt9u}aBfYkw%3*cHT1XTw31IPReJ%VW3DELG3EEHcqAQN|%0m;dOK3ATI zc#mm35q)s9<~xIq7d)WXIAs#~YGgd|9teB~kk|NQS7kb$$aZ)N48Hl!h>olto(I

Xxpk zdo%p{#s_J-<@aOF$KA(kM~}62-uN+j(V}>-u%gj3TdL3BiMe;~e(JlnU*^6$&=T`X zlrgP9T!Gcq_mvDrO7lAxfSUv*&viH3XKLzBLMF;sFclwkQa}gtqHt9z76ue;0p0Ez zx3K}22#PD}Ht7ZWt5h{nhsg8_mH7B7`YduJI0B-?{82!Sv>p?<0D-#|P@W(ySpoKu zET|)zUJxWCnw%y}01E=O?t5YtX0mY!3um5)q&QWE{skO@5b#(%VB)-f3{`3G9UM5Z zSbiEvD5NL=R3tgVWk_(y789|DpoIqT=H5F%(70X?_#*M5F4y*l&UpQOBG~({f6#(- z4E&NhVqYjAvtUCxI3jVXR%yo=?+Tbi0pN57{x%Rofozdjw&HuD`0OauwE<^&JS7*( zLl-Duxlk#=8+i>}k@=vFL`X6T#5oiK&x-=H$qF~UIjwbs6CSVm=W zyd(tK1X&@1!g$l?!!gxZ5h5jg)xnC0(w>+MR?&<%n2EA>!f(^zMkYHQNmoflxbX5$ zI)y|D;VUJt6hR3)(`9m*Iawm4@}k*92O+KODy`g@>zB*-%M|q((Nxil=?vZ~rC3A~ z=TM<#o)xH;nX5AcF@TJEpJcMAf=rnEiWDX`wQprJcCyr6KCD7bf1{f`IhSB zW!CjB`kE-mwC}PP_rn&68{9-}5+$fkrATnqV1%;>ZBUszO)N_Ake1y>C&l0ichF6m zJ1r2^oe^+M3Jd8e9pk_>g5N+_6_jzOGjLrUc;E2eYFuST)VsBnm1&xm<6}-r5KV%Z zvaoo~@yJydy$W#cxQF&pq%5|{^3i#=91nAL1gci)Zt~7TT%-i_ih-yrdaM z8vM$;^2O3ibamrCZ=uP)x-BBkQF|W|333pZ4x8P+jUEo@(qppS?<7h4J>~f1OPCYn zm^X%g#|h#ab##PDF;r^s464q;dev?xxbi1}Il&BDTfEi|KxOBOQG+7?-+E9hyuznI zoiq?2>N}TMp$g${gE?-;>s{4w=&b<}z6G4iyqn0!l?VUZ^h2lQ$WtYtacN zIK72tqd@lKE{!qH>Fv&Rboy5n2}x1kMdrc?_ggZ7PP-No5rh z`Vei=3|z&R4h>QS3DMI;5V5(0K`fXjK=4Kck?6vEqXjBhQmQqJuVA5Uf|5@bqila$ z26~07V8v)h+5Qe(LQMcol|v;MDQH5{ED=?h$slBM{bYd}0X)Z*mjzNih4N!eVHQo5 zjkgFyTc*D~l_jN>VSMsD2D!HH-98Um%2c29)oV0|FnjH=ZvxY%_EyBm;MIoYkGf04 zzu(r5D(kCKtz7fIZz{LhyE$}OCf2a44!*g@6mV?;adBx=Qd3mi|WB1plHTLmdUZ_pg7DO@*u6Cl?1o@Q- zau(3hX%2f(nAmHc%(dcLMr&H-NsV4Hc8<|iF#BEJLh{F2WegSr31-c~235f)1IZ+Fhwr(CFv8skA#F0m?C9wz7b)Tu$RtQDY5iSLnk(!Y5|=t?Lkm&esF;&BiGF@hkWlG4#49*j>Q8wmY` zegXs~N{~}`u<2+cs^~>IFfecw1S8%k`c>bIj!kkR2(0tyl`EWLxlJ$<}D zsd|=F(W@Z1l(kSyprCA%*wLm(=*kH2cLH8?c-x=|r52YY^HnjRKZ zVui_}<*ZXTO#8lH8c}{V`tHwgksaa%g?ca2p3k`=A2(gNb=J}PT=wMkiVNPqCEpg= z#f5GNy;PwW8h9^TU$}MQ$fsq~X4hPBup1wK=FrDITPzI*N?yDT-KNxgH(#5(a(HIe zZlm$lr(pt0Bp`L}6827Y(Wpp-txl0s)@#nztBeqDcNI}(q^BpW+sEZHT!O_aPw6%p zoHQ}WNms>mghq8@!=&^fPZ>~$jSwTUAx5RaFMMCre#%ZJHV1P;RCraXf-kEiuRw-+ zp|S{_LMS|A-xt(F9(?J%5*b{PM+{pk*K(lpIBinnRgSW@&~geM-y?z}2p-=&%Z`r^ z!ZZ1@r2^twPYJp1w)8HUh*_bHl75W*6t+_7!o&tUcmouagE*0EjmX6FK5Ft)GQ0@` zcvT`?!hm{te$KNm&_Iw+y>KFRvVx(FVIT}sAK0F-B!fmlP2{w)Xa;hHFUFXDN~ZX( zQo<#%CJ%|ArgdN8tMk~>u>i|uaB7yxmYOP9nG7+;NDfd!Br^j^!F<9oJ}kgxajHLnG*d$%7m^CN zEcHI+O40!u2;u}bgaadHJJ?M&ypDl5tyYN!L5jtmx(;w z>qcyRHIvMyvr9+JtSzWZ%~D6%r5ZPOe(o3OR57rrGWJN{iYBH@{KR;HAo^|4{z9o= zJSYBWpnM~T6H*Y;N90=)!Q@C5I{Ow9a|p|`vnOS@R)Ix`7q&{Tbp%vVU{sWYeC>Cj z|G7{b^#D!zU>kIlfeE_dF5m(Irltib@FC{46oW3 z3%&?=BS<3%5P($(J$2~d^_zAFLc%R)P82#B^h%WUyDR_gY6I-o3P7OZGg|1L0W)&N zFwjNut~iJl;4&X$$^c|0N4bfRm83j?Tvgh8DpxoF3CXng8C;hpA@*pJOM`GA(L(cH ztx8(9j;hwuYfUA>+a~O!U6d9$*wSjU(^~=-z}4G2({#<1@W6oOruzJsu9;<3C++#s zi-uIb>A#i?t*S^~S$^b{wy!R=rnIDd_q!?D2YWu>p3y&HYHRSX_m()uG5zN(`TB5l z|G2B^U3H>a4;ubFy0Ch|ZPmqtnYE=))tfZ&j{4e>^3-h$_<3`DK zDH1lIZ6u=!46%a5E$tMDoahx7&h%9ljGT(w%{fGmZoT=9_z1epL`egrpeL*eQ&s$7 z!koKsfB0m9s?t1UpGzb#V}Xg<9ft)?sB~*>e^k6JXjxWd(!>}Si3;)?VD+S!MfX;c zT%80|3B?6xD5DETBd`t*PUDmqpO+s1I_-*%SrJzl7d#fm>Pg0~A62+UL;JkZ+>7(& zZ-h*n>C>7>p9=}AxN2ul5T9K@FRYSPg61onu5|{N;GqVYKNg2Gn=45I=xW2O0)7*nvMe^7Xw)qShqA&(3J&NYXKJwIhrY2Ty|x53%IO_ zV{phS;89X=l;mRA)4mwIe8hoItXT_NEr$f!__t>v>q9dJR_TIzNUx3W=R}2r8Bknb z-x}BD3dL(5p0^nftFIXSmtj2iN`N(me*3Y<=EYg}sDI^qMgF|(K3v@Ck(hpO_(w^) zLtpA?%a;?n-)r}nwRXDCy`FAdWtKvHC`{!W&%EtGIXT{zTo_l;EIQD>8cka`>l16` zhOLR(KU&X6Z9Fo^sz`#3c%Ra`kHprt$Ilqv+gT;#n(e<9A9~@ZbUwfC`PJ=1i{dK} zK3JcbWHMFeaH5)e<~SC3ura9}laeCziHR_P9zKYFlTSn08tf#8BXFVM@NV>9Y+dY`HJ2}3ZBHE z;`O&s;DyK6%+V}Gmw|Az8yZygi5wWT&ylD0;x@?qi5}ty5DEZuVkZebKxl=K;SoSp z&1DOK#)@Ns!RdGmXw1001c1CHz#Bb+fA@Ru0>!(A13Xa>zVUEis1~+t*4~W)0(jR% zV#nNmlehH%g{9!IT3l>ass%q}TqS^i$Dq=XVmXEzAb|)zaTOHr%b-gc#ks>!M8>ev zIA0nrxc=)%0}8mdutW(?r~YG1dZn`?ZPyud-?wup`C6iHiFH=dhJs3YWvArIolZuiKow09hRED? zSIIJX0gzdyi0sFxn%12YvqYKJ+1BuNn!hhBI+f(8g*t5w>|V}zl7y;iz!vR_!*ZZ1 z9R~-Sak5~I>*yLmgbLWs^3zaQ0TM2D!~$iX3-{9?RQpfGJ|enEt~|tZYj8D+VCqx{ z!ajXSTjTL9cBYb{f8@E`vVj8qUkB_3lz((9A#v-4=a^J#!27PAeZHt>d{5BQ~J;IVi8xrM#vd8gKA!QCRr|?iG z1#7M179m!?0LZP<96Go^TW%Z}KnJ>Y_gtVB-wx%sZ#e3=h#YQzTm|%RXKVQWqGEo( z5hpuo-XX7@tQzp4q-Q^bT&KV0ZGFTAAVK4hcWT2K79U$9&Ka()JAGkQ&O98Y9JmIL zZx21e%3&O9q*a%DBtRXFl)!uRsrKxM=miKzKnasBUI z4uJ5+n{lDZIGSBTmV5+#;RuZ%ZsseOq5L9*jrOD&v*HJ;rGuZsYjUSgD z1+Md(ZTZsn#=-02P2p~KhF2$=$hv-&SQ9D_UcVH5+sR?s`1ZmV?WVEyVwG)0{hL3E zOMMc*$q!gi?OLqc+o>1@WhYhK*x}6Mv~HRF(`5I1z2hcfVL`VgPJ_QILmhrMU#N^A z)xn3u{(^7)507~d!Gh_df8~*2DLv*7>@QMNLvLk>%vshOT(?2J5nak?fb%huE6IiA z0;NILfpug*k{^I^aM&XRkFV0XjW&d=F`sVk-2nM#GFF-W2(CGSCwCqI=U)g+;V`>j z&#e=w%n6PFp(r#0&O8F?h8yoB<7_=fYBGNLcY`3{jrb*dsywtGtz&!9?c~KZ6oJe%p|9Ka!>XI{5|o)-0ua9zc&?}u3Sg-ABx@l6)|1@YY|{IB2YDRQcXY(yqe?vhEHTQ)&L1iDFf zq>ML#zj2Y_w6~4A|Gd5UHjQu*dc_nCW{1G<`|5}Bq>TRQ5pSUfbUfBD0G}S1n6`TRSZ@NJV2#c!OBPr zP0r#gA)Q4Oa1{t-xiPMMr*anbeN9O$NL9tCWdStkAEOx4muksR7SWnu^~pVkY?xCF zX%2!65+1OPOho0ikR$=H54DmZ&SAI50?�{d-h~+9o%GJ7k~@z> z9I(z{I+eP=kSZma7C{F?ualv6U|>-JdTy<_{#uU49S7d?(z;dR4mvpNi5w__ptE+P z6dqzU>8-dS-=s$ry0!ja#JverQ|H${dNLA{00~0~gBpSZLVyGiQK^I(5j6~ApeQE@ zh)5ODDp-|-fHEp5U=Wl+WfC=@wrWw52#A1+Q>)e?sNhgr5p4?=$=!#3zpnqf_gnY7 z|F!N~e+XyJIU)N!@7eF(&wd{05%bEOAlL2`8Pf-us5X@k_&YZ1my%R{(3r#S;*=SE zPATH5CSd?4;c1E(qI1SzIY!%41VE2+H1qM-5d1UCr!6O4N>XKEn(GAMq7ZoMY- zyaus~ttw&gHIM}D3M~SONfHfzMGSP(KCQl(dMJ-t4rO4}E-L7cicYby-Fx;G4as~1 zOdYVwtbiuB9S0k+ShMN2uYOn?*a(!=0Z^s|QlUI5yKcd;V=PigQ@9<-o;lbhtYuL% z{sg5svumKVbfmu}u5C{wDBDH(?zetnws>f{z_*C4T-DzcK9}9nROT?0h1vJ5`}aiP z=MVpvA2=uLXacpu2h%^>x5~Pup{(p9D5*7p44$aB%;m&#w=H(z6TP5Ywo?g0PD+qh z$}^7h%g8ep#{g4s3Fzq*YF0>SpOS47vz-$pYn;1qe^_fcuwlO6b!yX1XTW(DE8C6?rj;Eb%{W|HFbz34x z^nC&R+Q-U3j3s+8+&n9|YnVbTE9t}esI65Hkb8sTf$qWMFdaC%l}zFMvaAqeP#0u# z)}X~gj{vE^Nobh`IU84tSJvi$T9$1Y@LYoq7F$#(wgrOh_&5P-ClO(qS*BDLK1UKx zC570V@*5+7LEH9lja__d?Ga5*PGIH6-SJ71Bd>;kskW>QTbCpeN3`q%rfiYDBur2O z0&rf6h(e&-&Y^Tk;YueYVJaZo=GR)o$EKDP?rY*VS^pvlwx`5`ATdDB7~+6AWkmfu9Gw+M)!`w>2Oq=v}IMBZR5O zjdEAxhS7DqRLdPSZt611ohdqQZ7*>HEH;4FpmX5= znL>Z06IY9LfoKDT$>`?MdZgCu^Q+C3}RCTA=t;RqfTfDj8vyUg+>M&)CPtF&& z)9K!f#?%ip>vLA<6ff`oEncx{dH;;>-~2jV5#{y^uYXx#=|g(%&lhrCt)ds7bNl+2 zKmp~-r0cnDxf>t+{62I4<-e|6`xDH)x3o&@O^K?174zxYb1vIIeZ?KZSE_5vbQ2zy z?ZNLSZqBXq8cTI6T6b^XhGmX z%G^vQVz!dvFyMx!4!W^ zvD0bt1hV14_w3^aY&>IWm*^q>g?$;F;RKZ}y>+9l87Q>gB~#kR?3u=5B73F@h-fM} zj7SO(AHs0v;pq&0ICsoU$Fi&axn7QTu9dm1p0tK07FP&KA6X?N;^`5}$xS77($)uBDslKHKR-)}9X^9v^PGdF!YH}b_|TDWp_@72NQ zb&Rsb*5UFuu@~+=8B=^({1EZY_=p!~sSm{UY+JYfk>vKw$a(A?6n02P#mrrY7pGLZ zd|Nq&L$ zW!bzfR4n&aG@Ixf%yufMCm9F$6a{Rc?U@716d=<))%+x~T)U=tXU`%akw8wTah>GUW@%@QQO5>XH>uP1> zB^9%mw9Lt2+qE|yTeMqrs4F28+(G#rM#ML;QS?Y6mYvGpPJppAaeJnG zK2$IVLZJGIe7@}W$1vE=2GlVOuAj-?KO3C(z!mb%y&S?I4wc)9Soy=zjtaZQk`KkA zeQ1p+Z2J0d$FIt}1Lhy5FOJV>t=W@!XQ`t7s5suzZD$-qzp^+WIyTz^ucs!9vbuR6 z#AjLQw~kVr;%;I6bC^TDKG|l=vfbvM25xjp*E{Y-5O@)bK8TfB!**iPc)n+TZTM3& z(*~-0eoteAeQEZtg!8 z?f>AClmG3ltDnF6klA%R%_(l|vgWpV&e$cVg27~3PzNn$jd|DWXn*zA)djUl=A_@9 z3bH;qa)+Jbw9(HeooKZih#`)+^)%51=aKWII;XA=X--F*CzJ*)UtY%QzF(6$(YIe^ zU|A}wz(*bHTODLFw^GC%vt6-w)7Ozt=J}i@OeTxF9yWct@ax1Azct_XpPjkgdCBg( zueJ~idyUWkz|E$52l@S4-GF}=SwD2uaKL6%)kzI@9p`CYF!=4=xU{>noZ$o)i@B5Q zr59TY8(9$StmkP1B-NU(2G^0!>%PV+F~qoObVQ?3iO=OJqf`0kG-@m_fSiDl>dkoyy&#HrWjSsH;i zh55rqs>i9ID?R%<1UvBqbEhPoA)*OV&_E{N!-`II>4ZSkLu?39f>-qwEE`II10nL| z1^%$NH5au~Gnj)jz{XmuK`YgK8)kND(MZvlpTwH$K^_Yn$)yw*We1T*kS+LUfCAe} zW3y^-?`UEc0K3|e1&9%fd(9I50Qj#>fca7Z*3S`_2%t?NK^r~7Lvj5y9BW{0X7<;( zo7ICBYKuv_9n*nwi*i$^amuLc4LjjJ;LwaYeoHRaYw;uIABJ#-CE zU>WH365o#`PGHP!+3R#8@ffHdU?R`RE7S+!m?MK01F$6%i-p}6)^IMi&9aP9cbt$F zEG5sXcV!9fMeK9uhRaJyAg)u%jcW6foj|g>aL{}-r2?9uD zadg}rh-5wmTVbIJ{2_?#l|CRiLIALGD!(7+6oBP_m+iAyRLe27yCnr&ATLtf$09gG zJP}4Uv>@`D=v13jcjG&itl2SbBr6t@fDLaF9`AOJNmLuuKTRYHLIm8IZa73ap9bKtOXTHuQ@EA#?Tm zoe;1lYe>+#5>$O&5GB)!!7eC3^rU)V87V+7kv4$H;*tQ2sg0Ix7;(hK(=vhUFVzvC zo~KDi;TVAQam3grWCXKujU#|K8v*@?0Ep-hm(ys3eQXRv|{al!9&+# zEuf85MdL)tlHr{Mhv3>uTg$n%!2v%;XjVKqU$J9xwEVKwv(CrQY<^u;OK}}&JTTwt zlA%FH;HKV=&-v9&X7_Hn9ozk*sQkx!+f8&gpDkN9|Mv3Go(Cn}-|unw;d0QgE0%ly zVftC>9=r|ajrGPWlUq5}pEUkT6 z0tC%+?xLqQVSy+>?_x6WE`a}pku5YPEYzxAZxRpVXOL2ZY$M#&SY{FQ*g;E8r`<4d z4wI0?cG#2P$Z&`vSL&Y~Y9M?K;m0Kqd)5biNk|}hVFAn~t&ot2o%(Bqw}HT1kU0=M zubCSBLcIrs$Y9`%fjd8Ay}>_u$^?AJ1a|pZ^>7JjXyfalA2V6^1)}CxWV^|&5b1(i zg1I1P_Xz^3a0^s}JZQaT`^x+te-GC&*FiIlmpYf+G&p^5FQDt)_|HJkQlD~2s~HQ7 zRqvZ*?xt_2Tq3`cm}i0>B~ZOgSm+4w1fH;X5Qt56@`d!@neWM|t~$AwPNzNsx|Sor zX?}$%bXzgzk9WvkIjVYdvX@34;9ie_>pl2Nt72y_$52|?Yan7puHvFsk7O za0(5NO<>w=*tlRkb)w_oq~x!0{l52_AMSfq48IlrHeWZO)9%;#e74Z(BFB!O%=~gN z!C)kic&g-6*kt`LKj>;s1(}{A?KY;IA9H(d{}E>`xO-N-$YIff#rG|ElyakG@u@Au zYk>?ZSVchr@gr5BX$VLi*os8N$jMvnkllIG|(MLNEXz z_`HW<=T3dFQMrfYO*Uy4aGc>)A=7w7C4mCKJpF8vl^B821cUg0AFl^Y38 z3?c?Z6#)spI)pmF8W>ui?n z;PL>#hlq{JvXw@oD$kbbC{7$aK8dv~X$I^YM;D9XI*~JUAE8B!s=WGb9AKYB$Cpn4 zKv@USUn$#HHyO$N)b!oAFQc8#ujRei);eGV@7*J! zL{_!rp;?7v=zua!sYVCorlYusB>|>E?EFG`-F$>^J%8GKAs}?#NWuOX&S`H5LT6Cb z4ul&&FT1uB;=+6(-y7gF&$;=vcXFL?}CPe2T)&zcov#1Ldi&{!A<~GvE@c&21@td zn=M*Po}pE7s+PwFuG3|iSX}t#NxFqj@5qo|=dRwu$&QWRtnbtw z?Xi7U{n;fk^8;VvxMSmuGmonOus1pTWzq0g&QFdnJ*jK%3=XC*{%Lh)nbp_pj)?+| z`^*dqJmPnhGzx@gJUdhm3<}Sdiu*wATiQo`P*%=^*z#SRm_;jB^sxki1^9a}mAsa) zBBD8uCh)l(wVb(nd1&bJU06BMiFya{JC4sQhhXi3^{Nk?P!NS4y^A}IcYbXu24oFL zJcBpY)d1?F=YAGbluW?cU=Le38aY`Gd+Hs;r{c~RgkZb%G5Z!fTq1*g9gj{od19x$ zuqi8VJ@WJHYvt7w1`-iE@0wDZu09q`1Ber-zrQS}of0c!w`J+F}!jdFp& zFYWGU(ijtfILdj?h8i55LAiAS2z9!fK*TsWdJ{a>4rs6kHwkk5tYT0Joz;Lvi6M)V z&O**>qedLDQlc?%d;6hA1*=@ajj^QXu)-vq$`Q(t&}b-n8R3Bgd=K`i5H0HhOG;2- zPLwbT+b@Rw$oC{A@+av63!G-Dp7aOVda0DIB}iR!PJKhUyqauHpj09BHG9d<-WrF2 z32v(1plM;&TsASxMKcFc-Nbq5$qjLn^fmI4xKB1_uXnO98(dCk(~YdXe-t)Cmjc`2 z0SbitJ7&Oxm3!GpH)!_SlYLna2D5NLc9<*WxI}L3;>ZVA$%Vvs_r-tjYc9YmEXAz~ z8tz14OiWjQaF;6Ux)KwuggP;jw62@yl;0&uWSPBnZzAH!2p&}C0py8|Fa$7?spKRY z-<^G}?D21zfMg(?qov)2EESAk{jw zeX3ps*85gyw~d3_1aAQh2{t+cw+cQ2OYt6(4uB@9aEAd_^(3ZG+?xXaJ$#g#Hw@d?qDvo?Gt)vGME+@X5Ncw%3hhzWJUN-lgo^3`is zr=5ocPc&$_r?oayt@gsfj zxSKkLoQg-R?-JkPma%b=yI}!xZi5qetQ1pd4dA{;hn=5FsyYLLLj(XifM+v^u2e`rt$R2wQIQK2py-ht9y!mZveoqS)TcM^lP+E>9 z&wqXTSn!s~279*#ueWab&F%+VyyW&yO?TcjH!o|l;vBf0(pH|Zytbv_)x1yWjq2Mr zGOu6nocMm_*S06OoUdrJC#(v{IebNb)b?xrbw@W?mPdU!yF0<~`i{>2pM|PdWzA;~ z-dJ()uL~}77u-8qlkx=rRgd|q!lKspA1dbFtDJn|{_DhV_B^T}RHnDzu{3f~sB8&c zFbHxT9&;BQ+!!yX%`!9OjlDR96#IX>AiM-U;(lO>Sz(}a|6Qwl{zqn}eRIg>$YOoV zb-l|IgN|C`C_bjwEQmtRf-OH?$j0%B@Gzv~XoXG)zD6XX@o)h~MZ@0mw_8_pmEL6S zy-pmiPXvtqI4L85E+}2Z_cY{tF7nL$)+51q;d;#<4df!+(}waXe!^P!Xmc`hV=H3m zklRVvD?)O$&SlaQ=o|$FZ-pGp;%0u*yF~FRlTcRLSfl*q z*zxUuFZwerlEh|_>Mm8)wH4Vta>&NN1?#yipGhL(DM{SgJ>DgI zgn#{W)c|q9A}I*x0&fCcF?%QD9P();z^yZcz@8|UZ1a+b|F5# z5ETtx&lDV=UEEZgCYb4#i}e$e={?cx;+&o~`vVJT)Mj&=9r;;}ZDsf~rq*9Py$51x zBiA?=x&j-2)}AYNGoHra^%=juHP`yLZKB8i5CT;O1#c4jm1MXmrX6zm>5mVQul)Fy zn{U6}^4fLd7G`k9tk;#>!?#tlvHn@ab2oCt|NdY4gDM31_O(xUB?g{tEp|rg&>a4dBukW#3)V){h1*iaBxYi=ByVkp$-jFqPW2y4u+O4mj zu)+uBcy+%=d>4%ze(oB5x|rVJ4cdU94Y_V9RL;ZC zr+d}XVe#&-g_EKMqm*5DQ%V|lEZy_XQP~}ublD$UENY5E2L`^FnQqLxv$ZklVc$ug zv4!`X%dC!%*PLv5z3@Zj2vHv=Or_KQdcW)Q*XN3N&GXdX)v>8YOf74Rm1;c}e=Z!z z6<$yxC!}-uf#d^9qQF&T;)Le&`TBCQkk2>AZ9#;yd3qW7HM#6dCFA8Bf3}HquG9{% zFu@C?9Q+Ok_WZqqY=#jRpp!x>Noc1^r}xvP02ROw#2Kb(2N3J4cw1I(e$P`eK0`(v zo{cYqEMS0xiJ2^fGKbG$sA_yoO8P19-BTH=x-GW){nUKHf|}cLYfVlrGYxcc^}I1N zO;x3;TZ_-@FI?~HZT;NDwK)^B??v}u12*s@{fpl)c-i-g< zJ+-Q{_b#5>UpU8w&$z+ywPp4f27WY=(GXvJULlnv@L+U4UyIl8r;zv@Q@D-^^Pj~| zm9L46P9gDdR8>9Ewj9|*vV{*_7~r06+VgUY8}~+4fs4SpQ|s#wpNuG2VBNAD4HckN z6r|m-e+w@QR-Hg$=83KRb1j7`d##Cn;c}v^emS{XXq}4DWJy(0RgJ1zjwkxa@wgl+ zokFL93zRuF>9ctvrf$egk*vM{3>t-5VI(2)YNT!=(qGhU5N&xb(m&FH+Zbt|sjjia zo;~AIu&BPA~MGw)JjZoOmd4il!*5P`xcQ$Iymd0>E zAkpW3A$v|8BYWt0<#YR%Q<#9FRUk`?xWrM5$0|~)% zB8Cyf;@l$+LP2KZC~PSg#ckeyeRvJU@?E?2E(StBip&%FbqxX+D;=?MNo~Ks7GxJg zI1EFES#}5qaovDEfBI2d?OV`|P;&jaCX(Ex8k527l>xh};tlGi^t*4n|3qUU-&OvL z-!N7oQp6)$iBHhtI(-C~-E2ixM4URKlgi*$85T@XRYx zbFvf}-?qypB@9>h*6$f@!zif*=utfK=dgj52N|=pqIsg{7(#r&^s;hNm z@8azjQ{&k$FK+bzpHVqe;G7XYs~k5$r_UxMJ*X3zBVrMZ20I4Wid|U(>4u6zNA%ldyT!o7uV9lZ#54e; z0G1~~YR-=*`?}DVC1yGbK-k@h2cg&N-f60sZeoo<<{qrkN>EcL@hn#H!;^fMn3lNp zr7E^6zBZS^ixjwo7T*Pgy>THbz|-lx1b*wpxnVwgI&qRLz>%l$byo!TG*jfe{G4@u4ExAouUU(@!Uzq!>vSJN0&<@k=uD~p)`4U06M@e? ziaODa%+B*!0D`{ zfevt}e8fw^Cd@?-7lw;Iwb&QhtI}2J_%31k#YDU?5=E`|Skx&JAI%unm-IsPfX`x8 zDUogj%I{Jvf#$9Ez!D`Hulw|X&!X}93K%7Jyk?Q~UeE{P`4z-tY^E#!EN&|l1Srjg zXK|>2hg_6%Xo^{KJ2tzYBvsK#IVg{W+oGpU;4dW*a43sFpr)`8F%Zm)2?WrAcKzMX zHp8`v5+&e6u`tK6`#~?UCzLYXdW!JbhQ0 zJY(;`RTkm3`hry-@>uf8r+zzaASsvMZ7}#J`qZEcv+v&?Pxa}X?@I6e$G*~yf(s!3 zyVeT5bt+46HuV%^?dA?p$^lMMzxgYuPuyQW0}6f^FG`9&ZG`Gqb^FMzv%4yLZww{A zA60ksKi?B*nK{MB?JnAmQ=LL5vdukXZl}GN z$SyeGQ+!PK!QpuX`xrfp-H4HY6I+(~4J<=q+(;m1Z6X3LIk2y*lYr0vZ|+d~R{@%u z#b_4bI}s@=rFaeP1%Yki7JTY#J_!j^T7h~DO76owl{|jspVo{XcO0HkxB=TnyOL{n6WHqXqJWTDal@)JcJ0c?CbhE4y5ENn|Ny=&6UTux3 z`@HrrWuGINS5P--7DPS~zlDef@^6`isZ8+FQdJ94CJZ^S+YT8RrJmev28)EA_qDnIDCrdZl|6^a5UmVXm*8QT>@mROZpT$&sQ~ogOn@pYJZqb6u zuQMt$=2sN)@4|SqXu=N0> zvwxEf$JoD5Q!}3s+=i>zk5o4V4I_E=vcrk@$m?%-@bB*Au7_>)%>|k>qaj^SHIt*(7wM8+8zAWKd5BD@npezEcQJWg(#Bq-;LT z7jh`Lev&Xk&9)CA;U2dz3lO_V{0XTU-xMAS&?OT#i+8cH{y81$Wpw4F3rfdly2-f5 zu{pAh4x!axD3kGnp9?Ju8?S-z(ZTUXP2rI_2S1>&2kd*!d6WtEy$7y3AqasV-Gp*Q zH>Q$3vErGI(x6@F8X$aq>l|Uln+b0973ML{`p<>9+ofLy-6qM^2S+S-gF|7hR;&F1jG9-eaYBA#rHBS|@$ zkqgR@a?=-&mlUHv#WiwJDOZeNiY!6(1s<2?j~@_tE(dj7yWTvz3RKpGA#&BIF9 zb*rUBRo$~$z8SWWzG;7#hxD^kPAWT=m9%A?iHTcfc4oAd%n2+vuNuF$Fe5HgHy)P} zxB1pl@lnja8FmK@ERpSz|MEYR{ev1PB_fF#ftww{_D6bZ&9w2NeBAM^`(MhQl9G;>PX8NypOLzJT;*Pe`YzfsxIRd>i4 z&LXRfGNHzf&aKfnz^O%Se@^^LTU<%l-0Ywl3sY`!o+)-kdOmXvlPlHOk^RFw;Ix!8 zxd9J5~%@dmRb3s91Of)!QXE%M!S2AFyA(&|B>oqF&Br-&-c zocY)Ii9-+V8AhY)KE5nC?-w?2W>nw8zphu={ruU%OcvB)iZ2~Bqt6D#M?9N~&s0!N z6d{oC#{)Zn^EfK!F=BN?@yz1*DgDshbM0vyWLxXM{WEMY-pDLoGNa=0cC#a0^7flA zE{yCQx%ui-%kSV#J&xRN@pwDS@Z+L=WByMxPTKif+PS~Wr#-N`{FJ_3K21GCD|pJB z1W`HdEv?`&`Th3n)oSxp|0nWkBWv3Yo=lJzsh->%6Fehq$28|g3fkH4$&0jUZd3Ag za&Ju5u9IP@PX;MFNCS2PID=WU4HyQ!Fw+*@(scpG^GVU5x6hzvkSGEkZp|QmFl!Jh zyo)bvHqHH>3#H#Xf##AZ_J8|-$i8yMHyc<;%(;3pf{IBmtY5mjbU~0r>&!rMQ<-H_ zDUk?0bo{l#h>&T>xt3{5k?A0%)`iX(CJH+dyo*vV#E&xf6DK+mI2YY0r=#zt)!g^e z16-76!xeg3O;>aF@-UJPk`7R9TkPmgLqOB{1p%m7Jg?Sxwiv;)q+1Vii8swjrw@C*8gp1tw0K3Iz77QI;8k`vv3kyM^&H(KNY`z> zvs1j{#$xORUHg=t-6-hUWkh3}z3Ojxt=Mkff9hMR_~#m`PaVxqv!O$e?S^KSPUb<@qM-H(QmySL~B}KuVQLzotqkOYEOb=P^jq35wg*q5Xy=jL6 ztrGcZ$CFzX?gr8PaAUEgv?)1Idy^-y<-Lgr-gQWVu%rF;NfSJR521i(?pnw@gMP8eI63z zLBXl~q(YRUpdk6Uh@dMsiaQcevtQd=@WnHaakREPB&(1h>!w#8Zg;6-02Q@Y~CA zp76QaTTJ?pYTb}*2!@~2S2#=$pYHn#7Dm-|9L+0e$Hm@5h|qMDfri{NIaA= zb5I@+=}>7#V%4&pTyu@hcc2eBB`3L?K1Zd2$%xC&71HY;7K7^bgaL~?5XU3~gj zR5#!zE)c#rw~gRm^1ukamim=!n(DsWH9dCDzi%sBT!IfY~{C2{=+mEr;IG)_}0Uy&%hl&0tP*T^r>|P07R&V~vHvr3WYJ%m`Tu28jRW2R?JAxJJn*Uc193h6s36Hd3I0(36z(~;k_~^{ za|X#kFkcKL@&oIH`KtVZ#V7F&!p}TP%cx3Ep@(47^3w=5a=WdH8#&`qu|UW)o?p{! zu|Hf;ogP(|Q<1B1QRvBT%O2<{Vo4uzJ!09hIUmF<1w*zOK=n}7fs;Y}9w7iL%+jDp z{4>2%&hql=rawZO9<@q2)Dy{1o4&#*V`#xxuH5HNne5pRx?N$dr-Q$7@pO!G`-Aw3 z*u1xERCOiNxvXF)?k}vHz2clv?q5+Jf-6s`P#WhcXJquU$FbEgnQ~T1RANF!VhRnP zP>GL>V$odM`XAtL-H`G2iFo-BOi=a=cbtsJpOsLeB$dEvlyrUcERU6tg6pO0N5zYI z{n-7hbDM5th&_L&m+HIWk;2PuXmFp4I$n&1wDAf^6;bh#QC4`BMPgJXqJjl@!9qnM zw#6b*WmcZ3D)-M5wzTp1v!i;EbZd%#qMi(-U4KG1%qj*USaKT#*JB{O(NZokJIajG zjR?>m{<$MZu40c=GTTZIxNI)L>19x`1q}@u7fWCA3o=zj6+7xS~7Q)oBoGh&* zRq1X(JN-w-vzb+&e>`!!3_xew2i}hxjr_jX>+d<@sE2>*zWi2u`k3dF*qZ~p$G)=5 zEq#4Bz~<1IiFn4%AD+f%P6qDc#~U{Yfb=GMBq0y@<%aS3pgq8&c;iF;;HB)rlu_c5)zlZ-^M; zlZ!%|n^y)~Bjc{s%f3$?JLLFbq$awkN#l_D*@100M`1S?Wwe<0AxFg|9^Nc#B%dZ{ zc$=CUkh3T?vRn_Vi;tP$ylOYT+G_OqJz-KO)Sdpv+ok-Nuc<}hlba)JFMzj9*uF@6E#0bP96W6|OJI>BbF%C>`4o1RtmZPp5&w)JNoJuEE9jl)BP&w{&tk-j^H{UfRA#Y9a1A~v> zE@;1PzZ?B?IB;8Z;i~fu4e96UcaJ}QTU#-we0;L+X)X zQFYr6YGd3Vg&TJ?V)yO1_)4Pc3J8)eH0(Db6fPnZ^R6PR-NPmBD?OwfK7va=dcUfy zeh3mmZVXOH9j47)pTq8n&enpz}bWCurkMJYW&~TzXp0y7ZhKRVRvh zeWVhJ6qX{R%E|+xSi2J{)(vI#D5EM8p|7#!^>NkEGgPdz8;?qW?JaFCH)QU}Sr>5K zS^io5eeT4**$DIsEdPW<elowY`FZ0?YESa*NJ!%n zR#E#S1>&(5!1DM<=t&J2knvTa+{St7u+ZRfy_dhIW!@geyupT^k^6%+mOJN>YwnJ@ zNsbwK?Wvi~NR|f}Fp|_QmL6_|K%G3uO}^@C#5~`?!3JcFZX|i+?w;KB2C4T)em0Pt z)a}XLa5Zo&Nv93z*i$`eX;b62r&_nC`s#|2+k5u!>6o-^*88eiLF7u}qEjlyCnP3s z4rP;_85xJ(p4g*4vZ}j-Rb?HGj%4h+mKZ;cO++LJPAa5UX>3?6O^44$^7%m#h6i@v zj8ZA*0g~tYN*~qB;^^rL<+`S!MbSs^?^NQFJAPZ;44gwSMuK|(c;Kp_etLj?J)`QK znBsIrI%eN2`#%jBoGs_?SlH%7&?y0zvG!1j$9m&R1g6-6bxl-Ntz9{d+t#mC!M>@b zmbQ<)0`67(D!Yn!6=p&uIf@0-*-?B_LJ2%ju*V1X6gbOkS4R0b@Z_~}O zu|KGD!=`zMZ;^Rw(DS~LNRL7{%)|O~W>j{hu&c@-zKx*bWx?BSGNZf;ye8>-a@;fU zog=+K!&}t+VP$@6ltHw8-N|bo?2TK;cAI-lI&Ds)Rpk^^oAW1qiVrYixHWf4THEAko@`s5?5qmMI@ljQ_ zUY6$Uf_3XTW5!jIw<0^PqmP zg?W5E#6FnEAuA-nL73%zF!{f-NzhfC`tVn@I$iYyMgVH?!j zhR4Wz#1l$s88IlX2Z&;n*cklA!B-*438Q<qhhNs8oWaPw|=eoSW zmFDOTR&2?kmW5SQ?>j%YHnrm)tG4KSS-2>SZXbN@WnN51gz)lFnop=iCjGPZ!K@QX=Zu2qe?eFM z!o2If1Lnk`vGBYQ?7At~72%o~jM?|NYXJDq+qaJX$A5N>>lUQP&R@86k;N_7_=m3X zv99rJT+s)v@ec$$VL5ibM=XzaZ5FL-ug48p`h9;<*HFacK^VbSv`r0SVNh!|JH={N zqFNH+U|y6N-`W;=d2&Nv_m%d@4&6}B$Jp3;L!%nq;m^9VGv($tGSZdp8~UC`b`0f= zo`11s&Ag%K=U>Fe@|-iyIGdd*J2PD~{_c$B=y>$_8g=w|w65HI0(-+GeM(A+Sx-(G z@EhPy#XZLObL99zMvbZ#{*<{wzHBk5a-0KQ4$wNIozgyV1L0UuXNlo!6Z*inBS#}A z)H#}2nqQkgB0?gMumqh(!y6_tIBKNdlysIn5+G>zU+bS>=JLo)%L!r6lH15RAr>Bn z7TMAug&ywq6v|3EcTYNg%#KiP`ileZuO1Z4Ac%6T)M7fG+JJ;uQ>PB$0>1&`9BU06vAO;Q!=-27Fn;Qv6jG zXu5-(dpYT7!YIhFgLpe=mHnwcRH=6}rDAvUs)&V6>8G2;DbpewzbRKs+QiKj-%Z7z z@45Y>Pj{4c)@CN1;=fw_YQ}=LZzI)*-d+(m&+xeNZPO9SM0o!Fsz=ETB}5_ zM1qEz#m+0B#ul}+rDHfYB@;y9K@I*nh_MaFI-4ER_4a|nT1FJq%?>?RW78MThYAej z&gS`jPydlp8^x5|5}tprKJ?41Z}vMlXP_OEk!yJ7`CT;|B8ibc4sg@L2sks_N5W9% z2KmhaSso%Kn@mz^vauN@M=eX`*WztIqb1@(~ZqttyER(T5TJ zOff!Jmdfy$*>ULCSd~6fvU{TtFi-p$9FmIkW!%H#R$B1Vur*7GCH$g0-r2k0m5Z;AOwgILzA`J+lk!yW4EqULew{(i z!75Lc2ch3t_^#^|rEm{1NL@NJ1a@isX3}R8LsVl(2u+>t0SpVm_7;k-6lo#CBBDZw z1Zl1hVjyanHoAoPXx)l~krDX^ADxU${%z&S=77^p(H-X#g6@|stWUlY3FU%_!KyDO z|6UbQ6dJapUI$=jAaw+C!16;AEuJkNSN9dZ^SCOX8Xr{)?id(yzRTEcZuPAno)T#N z_S4n(%MR7td}vTseDi7nJ!f6wwqA+4+j^TgVS&l9*9-Z>zKe_&uzZV+_`|WT-}^2q zc38yIQ*6F{1f-oo?OACpc@_A~e_dv|Ady9m zoVe-P^1|~yZ+hhBqZw<@dVE~erhdBX$Z)3rx{7Pq!7L*}@6>ra9LlH?%=P2aXbv{f=-om9fv5S5O8CrxSPtP}RE z^Z7~gICs;A2WZB?TFy2!b!(i+NlYp05|D4IMqfu?PAKY#A@$!uW1#xIm z20HDZ>MlKbyIT8P<*C+w)HCewF>;t@>7SnHm)E&Babb|+%?_~(3)h|CVkR%_StJ93b!FA@?wZyQ_5al8_smC4LV|wxetK$|NtM zv>V)_Tw-oDxJB5adWz*5-m$gwy~v*2k@=pm>jHPAAj^sHo%E@8tqT6s`^Qci#5mwj z+qa6%yidZU`2K_B4^d|O?B$W0R~p2cl|-4vDJ_*NyW&?;Ywov3IN;B8MLOV4`iolp zMc9!gk@h)}_FZCdvXgJa!EFxs);%q_R%&ac<%LLLsxV0qWfmIMWY)ZL!op`hMYX`w zf+UG#wu3km8K{K=Qsd=L%qm+SH&AfGyOPDzIz|#e7YWp#)a6Vi@DSU2=>{!;VtA6? zF=30J4;;hFgh4j_sNNhM@Cv6ELgn{_g%^lHh1h0}0e>;9Yg__zglFl}w$=+>#~=I( z3})If6>j9}$k^36W9k=GcU;81O5Rl)wo19Pw@%-zkcuxrqV@>KW$0+tjufaLDV{sB zj{at5)GyB)eyS`#2Z>g>-xR)XrG2+Pg|Ypcqt=B?9@GyGlB8zA9GoF8S*H4Cwb_?| zdnrn@x2gGE((@jlRsx~zrAfY--~p`?@-Nh3K;bx6Zz<73JBn@^hqZs!;KG zj%`J8)F~sY^spkuNEF&{AHP8&!RN?621dK39t#L`*PeCxdeQjWlUe-Qk(v+UsGJJ3 z9B!kq8O{jJ&z?2Ki`<)<|GGIpx0&ITpPyz*^B2kdBE!q9LY178BsNZXZ2Gm=UvEf%@<8`z&TiMF zG?&!D?F+6?RD6FtAxp5p!aZu-EV4gr;iiZ;rqRs~ww~E3-<3SxS7!WpK}oqt?y)iZ z$KHDR8}SQ!WTJ8V*yHx&LszeKK6Y>)KEAqSU-G@C62ldV4-(%TWF?QPVl=N_(CS03 zzjjZk>h^xI)VY7(^!d&n&O_csUJ-KG#j@FRYsTNRNo|GnoH6&M{>K{$_VB4{ED{+ZQ+n zX&`15+y?`!f*_LPBrvn(VYOyZMU5|qfi~0>(Ocv0;K~6-$D-9lp%)K1CG=UusS@&( zDPoS;VmnmQcVx<{dQQM>)deg4Lrx?VPS^#Mr|L9yCLUOfF4EA?9^$*W((r5=pfY{hhW8ZlZ01+oPbxI(5uWj$2zZ$gZwB$aS+q#B+MD*1U( z)Q>VP8PGdN#T4n3tjY`Kc@}Ep#dSFt3;lY@hHJ>sTsF@Yr z{5i+I)q7>u4-Za2N8YsaMA(;%;K-qhFTGt3PB)xR__|KyINOk3Ui$gmH`ELoG|I#Kr1+Gq@*T(9AqJVr-n4{_|#w4!rI-t^9iuKQ1 z9CqvDG5b>cN@AuX{&8=V2?gaxuwy2G%F@w}(E(3wYL`1LB%u##lPc1*30Hu;Lu8W) z6@p7iQ0KIyt#bAnF^Ih69V(Rx6aL{DbuKTZu6C!9q$dV=K-z%xi5#Jz$PP~V=>fbE z8jKSV+W{?THr0^>j2Wp=1%^U0NVX9SAWKB1{i9jdPFn*U3Bq%vGf;H)81a&AngHP> z$R+{P!cC|ogV{BxJ`*aD8CW6-stj+b2>%yp?*Y|R`uF?p^gu#LBnhDhP(%ol03sr4 zXo`x8q8Jc$ho&e>R8&w#5(q__SYUz`8#dIas8~jlP(+HNj3bN_A8%nJMsmYhX2{Ihl#sIRRt;61|iAHAlPN;#yj90 zaJN~wak#s@&2w>+XTkWrX!ZNA?ExoOPrNW%@R>TA@r`a&`}yR|*MEQ8^F8CA@BjR~ z6_UObs)wkgE2)Jp4gX~6v(~20d{SfNAD9(8K3|`#jmVF!n_iRdb4S+G2~V81s0ndPC>3A zkdeiw2yj3x?zvJCZmQ?LWVI+GV1wdrUJLDT}iqKpetY3wVJTLjUNPwFO$jO%p8 zkSXmRKX(B-gC7Ny-=6>v2%*pbBrhWfzI5Wv;S(^FmEt?%j922@T|3r5rQQZhS{bNg zjpeCfgzhmFI{a%4X#!b(_YhmC7(VU=4P7CpAU8pvE{?VevDH*V-QHKA7#0Px3DKyr zf>#(41vJ!f-EJ~0$uJD=QW2R2u=XG5yccEpvK?77=>xkcEC=V=P%>XpFUhCp9ix76 zH}u%%P};xQDR5n zdrkp`D>I6*PGVZ-3+gkr5qLEwJyQmfQy@ztOi>dCu!MNE*}%MSKj0vF-AFkB705IM z3eCo~C+l=EB;3EtIxTgD9DDThK&U+=*kkp3#@QG2>DDugc`c8bcvcH)Mx`N;=}u2z7YO~`jFY_A{ELUHN5Bv(^ zvr5ZE6>U_}>5{Zv1p}*ZJapv7|Lo)ulVkGL*RVKKH$jOzn{~n!d4Xt&8bq4{cvs@Z zI`A6OF}<6yRQu0}ohv*7dKTmADj_^~0*Yy7NB~Z!HZ8Ljt`uVG7UR^R$UF_?;``3L z1F7|pBoF!TkYx{-)5=~DU;bB-eBMhC(19ZjLV&Wk61BvaM761kfoYwL`4`&U*AanV z=UL3%sT>hdiHUqnm4lNBQXMdGgPsP22#=-mV^S;_5CFtbfayU1z-*I{Da;`w3OQd2 z=s01nR!+@i5gm9zK09N0tX}yuw#g0JDLh*VjY8wXcc5Ui^_wjX`VHYnTP*2 z^5Aft)Z0-0c~{;AL!bYmL**+BE*a4GK5`414jkVo~8ONuy7{u!$mA zcoG>n0O8q;lQSh^8^o8H5)aiSLke^4uBKBEjLw1${a z#8sxoNjqd_Ab2!}K|r`RAaDd)CTf$1ZSC_hdIBQEO^t@55%?7gtufewW^Dd^)%^w) z^ff#;{t4lsKkXhX^TT3W4gcAXBJH9^HY?v&$loDMLZb zJe-1zI56QXGMNnD3T2=|iD}rV!6U;8jrJRS^BN2m}gFo`okf zvv68DnaIPQpMXfQFAh}-&&CwOlPkq6{J;MF|8va7=c&}JP%|?VRd_XZ|KO1=p@c0a zZFN(_&3FgH=aL=L~NAo_Sqjhb*g7z`kE^&@|Ree&P< ztJoOrl|xn*x2{Cq9FBx7u1S6ycKxD4cx9b!MYXN3#`i9GIR15OXJ=LCl^O{sm6w^o z6SlP#cESBaCBvW0d4r*^j)>v)p-tb&;Gr7C5-TM!noBa7yGW_bj+iIaC zHhIV@p;J_wWu;t^Wks+`6;(TQI=nEswpLUFzcQ4O&^;~_ExUw;ljhI z!D^9Obl2qCM%D)E4eAZW%?*U47Y?1gPWbF3~f$1Ts7if%M3UQx7l#HA=;c;BPJfAQ517E*&a9CI)B*c>8|H!J$NRKcmOO+|6V}{znXaV#U6n}F*gL50rMDY?z zDtjWR+umL{YurVPp<7NMe^?T`hoCd`3^IsCJj z*t>TC#tDNCJEdi1Er6ln-H;M6>u#}S`XV6iw^+jl-|pd!Scsdl&qrr!#3R^ z6K;+Eul4)?ttkr-Ubj*4HtUj}2gTN^0inv0ANQMFYv$zSv57#Th%lj8SEh)Cpt~_W z*fJzkmNHp@>a>b;g{bCTzJwfJOE;kr_^jFN1xOyzHRvMNJh27~_bGUoEE#_jil_QR z3#&o5p`aS}Lk&j0m%}h!gIiJA4df!)>5b4nF9Ej*;6}hN;;UxB2y8PIHx)PY@Kl(r zLF1&fj*Y)nlmEi_g#U}rzo6X@l*Ufy&eMXzAjrakA|0sP$pJd$D$@|h(D^XQlKZLO zVeL*c0j67Ha%R9dVjQZ#I7*-f-~{6*HWe5K{W;#`H2K(64H3qWK|>a6mV995pS4tq z^XcMxP?g+GV313sx`|l~mQYBQ*E{rZ>Eb7J@qJ7z63-99C}jK~XxT9L<`JMy^MW)Q z$YTix!AZ@}Jdi7+^Xna;;cD99R;v6D(BD7*-}?POkH6;y6i)7;NspUS${%+ffhf4N zBbd17%6C_XpWUO_Db$K5C!AV!vC)_f?-7V)LC|s8NBjJvMs{rrK#vmYwDop^R@xC&wYl#bnr^Q-=<=CDz#r#nxq%H0$ZP| zO%gceN`s;gUbUg$2P=o-`=+0*r#Ig(qhN2Re_(qV1+tgWG&Dm$Q$KUW^bR?7PUcLO ze%8R85l1N6lQj!UD&SR_4w(*F40*~8`dQtx20SzU=-5X09%ZWOh+~f(0ptDEcxKL| z7|60YK>Rag&17}237EZ&(lQZR8c_0wE zqp-}ZeB1fG*<*o>dl*)ztP-NV-(^DDHOVeiviI;N_E*^b{l95uqjzC>>e2iMigR6c7Fdg9HfLf@WAQccbX22FF zAhQgud;t+#9ZRrHNOw%HvV~`(L5ERAD&_@9Aea!?hqcpGYzG3;M~pALFpL5PzrVeGirJj z#x}&xRGVTJ10hmnY>xsG|J>d~3Cp&Z51ftZJ&w9;I590-nsagnm(=fWUWOx2c2U)= zibK!seb_kLxU`@!+WoV6W@yTo#7PAK$pw~zFywA?<{NWBm;>|t&WM)3Pwk)S?=;z* zH-1%&elB3_9W5mHdB+bF{bnn=Uo3HspUx@u$e#D+aO$i0*t7#KplrbQ=?gu#%KGs6g7w^~19=w{J znX_aXdc4u)hy3z@qi}V{GDnLgr{n48KyRUWj>l;liCz_pX(*uQA{Y5I!0x$40uDg< zvHCT5=NRjmTf&dpcC@}Vw#aP-GC!dBv*$GrqsLKDUl4Ex>tpJZu@bmy<~;6*iN|5o3xB!lD2#GFGmz%yI^u)ZfG6 z@m|y?^s_eWf#@&x4bO6g>;A&3@Th2}G*WwKAB{FzHBq@}vs;^=PxbcCR9wcWS+}Nb zrg?{9aNESUz2hcqA3{$2w)Gb0Ps5tfQvwaHg8uXPBQI-zJ7ckj%zyXtr;(%o40jvh z&iuGD&Cnm;MY)qy`)0~dH-D=XOW!sJW)CaFM8w~6t#OPRf9rtHGtIv~_9bt1NKyp1 zO^&K`twSd4<5csuS@#HnS$;-k1E1-gPCx$;0C2M@}L z!Fxe35ctW0m0%Cll!3TW!#4a1P!!DU0F9>-qc~%|oBT(@E$^^NAS!8;(giv0X&^2X z6`)O_;nl8_iGnC}8jxBKR6hVwDG(v@XlNm|%)?CguNH2%g+wuJb}HYU+| zPdyRs@loknKEpxDZ`hjv- z5OAKm+#JAsIe&qn?+iEv04D1WopEA`E8w#LXa-T35wfeOQuNlo%mz7V>)a2PZ^}~iGhlY3k)}zs-n4~eE>AK!_CA-wDR$i^ zv5mdYDHgqHYagt;wrv2$=|5rYOdY(^r)c!qgr@D;w(;emGYK=gq^}Ph?~A=#wW$~z zfX~dH_PE$~to{BdI#2w9Y*|ulne{;PCB2eqE3uZ0jo2_)zI(i9Tg0r^Ehp+u+QK+{ z$5yfp8_Qw%-eW6i>7Vu(hEcfU#$45>zS!pbrNvVN^Y3$RPlW-zg;78#AYf&#|MeOR zcV2ddi^tv{hSz|dq3HL6Ne4D=qu9s|qr0u28s-{P7sBgAGs-AR2$=jqlc8TjOn1~g zB8-BcNB+-$hT%QLoiN&dG-wR!Jp>=e?nl~V+QXC=bQpiZs@$E?zDKr);OD4$NY&eI zqj&E?A1O1)sh*iL$lDcvcR!+jvC!I*x5Fji;u%!EXLd=9k^Gw3l1C7vIpR<=N4=-V zMkYnf!UO1jIsr zz!z4Z(8hjurq^`-NQnK?Ii*Yh%=Hee{jPI)g4rkCZZo%NXOvM31{4v5RSI-H4YZO> zWM)hk&H6nYO$}E#rOXuom+KXm2vG`ga7Wxl)IqbXfCjir`7+Hfl@F@amfcgp1G3Uv zd;#<+vrmCo;HfUqq>VoXJh~%z%`?Lg2QV$F>e_c#K)tpET6{9TDx!u3I(rUDW4dG} zo0GmJftv#8hqDZrgxVp1&cvUAE7d~MQJ6q{O%Vg`Bou&!NCuD#nV73V90~5g7YMMz zxqJz}9yVSBr%+DN1KYa-rxD29Q1D8inA1rtQ6zz9shumo1~~6uNOA?~P?}q9`K~m> z-+jY&!-hu&^<~31gJFMp^nw-NX7wc-ex7&JVBE3$X>G(OXPGIU5Wk zS-o~UN||b$kouvn>J5Vn(H)*wLmT)pSLWu=%~?8wS2HfA2Mntx`8^i9b^La@Pe24{ zbjj)}%n*4%_ZC8NA%i0Hu@+D$RDaz#btVpX99b(?13ErS$#35+2@cXK@ETd2qJu$6 z*VsQ%_Y~(Q*f3NBY80=PQ)c^KQ4M^EJv$fmiB%iP#U#0@DrH*)7(~FQXd~<%)7Loc zb)NbyY>V(0=KLT0MyZU#IypSMv4#wqXfj+sj!_<`Mwz%=N&=3-li?)@?*jt+psYr9 z8X)~(Rq-G5gdf<`Q@{XYB*(#G2+S4D+A)JS*u`Pth6z-U13L%kx8K3&v|!%GenI0~ zgH(aBUEPDNw9qljH-Kv~%RGoiV`t&j#@fM~AiAaiyNQ|tpk$c4jiLky3Pn?c3}l{A z=N#0-cF^%fS&#;PjH+PraB^5s@jwu@v~Nwv z`zhk2arXT*zNvE>_%$SvTyBtU*yn(zx;9f`+RuRAn!A2DJ^5M3WrN}5t{?6`GOYRj zd~nB7LMj<&*yeer)pM8msxkrwdOzzbF!}79L+94A3m8ZjhSzghR~r83EDj zL30P!j2D*6o1`?3U|td_jT19f;xpFv_o<2ddE(RZO4TIP9<^64l!mLqa#Z$}RB6EB zlvfgCDV~$ zg~yF`X_6)CY$*+g{rWLpcQ@1Jn<2SIHCt*4QL_*slg@~?5Eg0ocW;&zl0$41R+>M^ z3t95biY9u=H1XRkXPb&9R>fuly5Dq9~i>a0?BOSb8z6%1Dks_ju(( zRe+3gFr#PG@yCXe>M_J8)?<3Pga9fv8>!R}g&(n-2k>AM1@F;)4%Dk)<#p z0wIlsq}wEJ8Mue_0y(C!0HGOiAg%ze{9b?8S6A^JkGMspMY8Xi>(Kpx>NSMLU~ z7;@xq=$ie&IgDVe*;de1kRt;SE`Z`XA>adZ`s3DMz4cYNXet*FwCxySRt-4WG2rk_ z#BPM<|FjX&_S9&o7S)W4UPjaB4ehpnUhVut;PWqwC#`FK@+Q#6O{`kGvZnB$r^=50K$Ix+woE41%Zg*%Rr=>`Gfib%k(DDg!XrpfV=ln9wdK7Hc6 zhdjh71qSaieT`%x1LFgSZ!{_YiFboaOb0)aD|EvVDGwC_7}l{BI!7*?T-ZHMO+x2W zLJ_kR;6vsic|PDx6zizqecHt`rRGv|$pr9U$#F~qLN^8Pl|3BI%)>4S&xvEAngDyu zs|Y?utVD6>qy>t`6< zx!L7u?dtDthULNNx{nQ;H$}X;Ui_Og|*)-_fg}&kYwLg<`;fq(peqt7xT=Rex&sWf5*{4Uqh*u7)L@2 z14Wa`N%L_nu+qw7!vY1wQ!t`4y+$2PSJOduask9>PD_NCj+0$uLwb z2deP5q{{o4p6d?K`uLAJ2CSE0opflzDd-vsfbs`=-g=KG9q{hL-0<*8WrN1#q(l%G z#sq^;bbNya9`jaW$C{DqCJqF3t;QmTVM4cqgmH^O9)59R0SKD|ta!@W;`3i~fBu1^ zbiTQ~+is-Z-Ta%~sQovt*{J;&#G>pYarNLytyKlt-0-#4X=ka`5fQN{#q$}}vZm_C z=#<;Fw>cC3He?LF^F3fy9^E)=?w>`;I!X+>V&UKaRE*|}K@z7R^6;&MRf}87$P!62 zu?$_cHgh`?vBZ^g_soRM0uq-dfJuEo(NZzKa%qE{{q~h}v$MS{d+7%6Fj$pV0xsK) z53p#!tlzzeIw8TDXNu)`0SJ8GN;HYb^p3NO;$bxz8yPdkM@pXT0sQCWNW2a0dML!_ zh;f$|WMbHM?CWxH=%`QG0RXv!$WF;G(I%Tf1?eI>CHs^c-;4-hK~J2tQJ^Ni_3o9- zq_V>oQ)pA3;RP^*rqKZ4OvCC7>ybBu>`heP`u}`2f-%kaTI`btnP7lbH=F{zJVpb* zUNG;!h|?gm0^gCSrCrdqPsCzBvG*mu46#}uZDKHd0fk@~IcPAa26p*>XyIcta{hV% zMu$YLhW>vCCLo#t;a+3)xfE2P;S4Mc4W{Qmf>=rj2(ql@jDqGUz{tCRGH!r4Ft}_d zTv@Cx42A$7op-23T#bi=wrn0H?!!^~)o6?M+M(GGv}KF31jqF8`yhtL4jyD(U9B(L zub)0K&HxWVh>z4ZTk&OPHY}j7raQ)Uw@id$gMLro2%Hb$LfDeVYx|D6O2`?qmR-SUlytTmv|eu~p@ zplG7tz*W+)`z`cFbJzSYcUE{1;ppRL{0VL4&r-|y6WYheA6Om#0_Uub`g3I-)4gWT zgumR+?NY8wYRE2Iu-t)On9++*{@ySx0Rl~*Th;`aO>KnFZJQo z^{yTFEM%5ggfJvv9x7qHiDnY8&l(?irWVcI>(Pjfj>%4n5oa#}}%1!h_MSAGgFOU8t$0 zU3<23&DYL5w6A|0*>1Qr_)mrZ_8ZH7y#r_c)!J2{zth6MW91s}UKhV|RN?W@P|=1X z+iwIkfeO*BIArO{3qNf?_{;jpUEjA~`ub}1)qgJaw{|U!_<%mhl3x}IS!3nI$Zk;w;aiErLJ`{B9KYy?(gh+f?GDka} zwY}amE}bXdongqLjndtF#!1KZj57=gkLjg2rE)GD=1%5(wG!4@TI)oT7ui<9TbWzS zpD)X>z`OXDtgG3&a{KIs=QtC;4Ta8`T`YX3ttC&}7@$ty8~z|}^nqQMz=EPJ#_Cr* zDTc*F;)j2+3FW9jzYGm8dF%fsfJ8UWVfr2(u_8}nCZE4h(x;@6NqEl%o`IH0I$`%& ze(-W(U4&Vj&1uY~qUv!j z4yP>pEDCI&TQ-^)OAm5P7NXB4o5K7Tlm@|M;RqAytf{huA=^KO=Io-srhdex#xPax zZtL{SASc7r7;GS8qJkq9cZTL<4B38%xiqFf=znO$@izS>^-ti4w1LW*a=UVS02~Xm zbjKRUJ3Tfq>Bf}L$Qza0WeFd%rk*E^zvvXQFBt9B=0r5FT}#;{+wNX@H6mI)H?p## z%$mVZZTltkMLH3U!IOpjxZv!pE%E-GaUy=qTN6sQqNNh8!zoVhUnw8s1J9fE%9PFD zQ1iu#Xfz(;+ij#g)N5^ne+xfG-bC3Ix!)t<(aFK@(_}Fn;VqHP;iqRR%*Ig`oZ7c$ zQC9EJjyV~pw)762O4}T%-Mz;`qF1Nww#wYSB&c3{jJx@Sk<;+CJ?XE_+m2K|@B5h5 z3x1Zj4`&>{w`kuS&jZEjcHY~DcjVm*{nMRxVw8GimwN+<2H@i6_Ssbh zy?;%noft3;7j6)=d(+((XA~SAGTd|j(Qwapc*mS;%Drn$ z56(WdBX9hc-eJ#EMRU2EAv-?(wbSO|%sAS#*2XQg=})GQjcEx_$J{^{l61&ZGq@O`J*Cc=S(P!qQM#jNUqd zZQm{JH9`5A!ccsB$V}CtqQ?ggEjh?8dLXai;N?GSLz(x*&f`pG(+N4T8pZ56+Eb_w z|L0-_lf$!DL@JmdECKB|%5{*&M0Rc{w)nu|37qFd8%Xx82BaKbf$y;M(`A88>2SG@QkyzgqA7C_@B|0mWwgKabPfov)JF!dOQq z506;;1g=Ny_cn1ePe$gH;wv3_O|8C$p|(-qp|(#yClnuEqmLrW_Od$NW{$@tvUEWq+Qi-;8$Xp*^Hib2P?1dhb6-!;tpNi6U6 z?ooT{e5TT#Dq?1jDD7#6kwp{t#@D;sQv;Y+QWw8a8a`s1N)9{-_dgDHm6wL)8@W3f*U?w5ZrwnO$^> zIK&5sLx_BwBUZmF-HMHX51jk2|CdmaFFb^t^EeZFZn|UW8$?KmS;UU8A1CWgHXAYg z^R3CQOE{kVVD@)Ar#*^~qB;aQp+N$zXig<4O zRf6%TJu9y+!D>}GV{L+ZuTnSn=;-y7N5akz&GYzSoFEH`uB!GDmV`2086H=+&3l=c z+^|1l0_WBb-_M*medg4u%Ae*g*tV~4){CO z*PfXhLjfG0_8E+lD0LML&YaGFuP7cJnHFjA8t9H+U4 z$Bm*H$m9t8V)+3$vxz;tyPLFEK7v!AIGK>tcLr9Yu$t9~@J$*6PT;f4m1ub_f+?Q> zwDkayALKBM_&}za$>IxT2)^%H&Zc+O56iny3W=D{A960}hQ85Vh4tnHQXV@hKr>k( z7T0UzTK^yk-=qa~U2WB(8Z8=KT@ldwAT!SRSH+??0!1u6kU(k-9H+cSr+kjwyWawb z3Ri)*2d2Ha%np)5CUTDDqlZZ7%?b(6O(+q$+G(^Ic@-YIo=sesTV@QJ-5>iKrkVTP zy5DoF=+j+i)=vhjH?oaIkMCls+(k_<@=c3A-3~Bq|L&+ComH^ke$KFQ06_UemT&`H z{&n#G8^BTGM<+0O_zuK}9H60BBB|z;xON*SB(;CGN7I3R0^hvSbgd1Uz^9*!gbNtzzqvq z6zKuF4MB~@6L9^}jENSqX`xp%okQ=)`@A#LTF2nYh=Czdc^?U2I(H#O*N&; zyYu!^v*5}MjV3jYKtX5HaeAROLC5#u5#XgwCs4$&RcnAFQHH_Lux8X%v^>8xA{fQe z0FN1zVm{9ak)v!gMW$>a83*7nyja60Qsp5$5>l(rBbrED0L77i1;cM#k>uZV^;xHl z{NJnh-+KOkZ!rASWG6B*8J$e8q|0NZ9%QnSI3^a4|1I`}O4{UC4{ z;>vlu$W%>=n^?5IVAYc#ex4Fp*S7e4E)u{b)X7CukYeZIktxeUp8k48f|n4%PIUe$ z4WUJ>JB`wo|8>DI3gjis=-`Gobi`3<++*LpGc*;qa_sZi^I?St41EHr;k2-RzR3Bi zP{SqAXJhr->9yI2&fZzQ>%Z%lmcw7{{2&v(|M|f4@B_cdG>rkRbvWnxD}p*^bahT- zj#Hs{4rAqz!;)b@;^A^=QIsjJdR2mbfN57#4$^#Yqtd}D)8joa-^aK9xGItAv6^zYeKoB z3aJR@S@c?|M%MHklJu1F=5z=1sNE4zUan%HYj`F|gvzEnkC#(d%bO*?t)go1JVXxH z?ZywI_*lH!%#qY?S4XHJmvmE9mZ#7?6&jq0JYCEr@#=#&1}n2Bsa*90W+EnY^p~HQP@vLhFMa(w`i7)9I7&|smecl;eT4cMiLX1 zN#v~N^C;iU=q7V1@8g>WGp`RnO6+Sn7Q+M z;eWT^sQ7P;*yFq4(5*IfHyirarKUzlioyyQ_X-%>e_psfp@_27WU-A2#l6tYW|0wX zQJ*d}B~|`ehKi)^bcq{bwT-wTv)!{oO_vDBv;c>8^cuB5h+fkbxX6a9LohCTPki8wU_kP0djpN+EfkBg1Nvkn-CUV5YB#{mCT5*&E1zCEtP1fxIGywT4yEid=_^d@+_9q4E ztN1D*->k5FCI7(}S2e(jVoB&L$Zm=%3Hkb3Q35Lp?kt}k=TtP*1UXyEDl`?+004;- zs7$(ZNRWWXr1Qgs$Hh9*NOJ0np_f)eFN%8ZSiLUle8D!==Y>M!W%9V>aX!2-Ih=}D zC~4GeIGJZ|XrQo}!0l)AUcK4bsDCl^T2AC56R}DY;&4o%kp`MXF^ZRmqI>Z{amQFQ zWBB;lsK0_3L9~!CJPhB2{A^h)%MqUUfm`tDnASWpcy>7?#rs%vAlzv zEOXVY<4V;m`Sw!9OX!H$WQsm8XF|o)N@KlUKMA3dP`-+9pP6YP68*CO@Up}xk;HzF zsRNb)??{#vXH%RK`H?q0;r=|Y*Cg^I|WH*9&*^gj99$_l`0_$ z@H}UT$K_-W?}vC?fCI3I5aZ8wnBJ{@1&RoDL*(hI5Q~vna?nNqQG{>gj5(=eyLxQ= zHF#CxsHZZv<=HzHg&_X9eWQwF9Yb)8jQ6LMpOUv-aqRIca>k+o3&-)|MCD#Zo(L|l z z*A4!9-B_#hYVt`sV9?2GQiDiYh{TwnWwZRvaeQYIQd5w3mxx^9vZ8}Ov00nUSqmNZ zvU^?Y!+V__hVoMjGDf}sDYMZXjWea^S!}uFZh3aRsW>Pa>3QsAjKEO7yrLn_ifm%%JcudAdVJ# zXwC1s4|9CVw|3{J&73ou5qg+kZ?pT+=$(2-J>N(7P<3_suKJ9MQPz&B4`%qEPm?6O> zFYQ6p!tevD>FL2c%NR)1$>r;?`mJ!g zM`MI#T;FIxIG)F*&VG6HoaI>=5nvvOjI7A$I}O5~V0`bw7%fXk{KXP?#g7F&lppYWFNgMT`~|zGrc>NOCQVFN4fo!?dPV^GtR9CHDeKPV_g# zMFd9VT;j-|>MmK{1kMJ`k~y1}!$!mEp$nYAp<5v#-|ZcT40czU(Wm&|n-G#Ic@fIoa0t% zvNCvOThy){7F*9vdnP#dX2H9%e-2-X4|HQxY?*lJfX5%V_iWny-tGRb)(y9R`jD{a z%NH7~Z|4au5QdPTF%>f8@xrBnRN)cLR#*IpW)w%%9EX2@9HQyuRy@sHn9}mzc4=#z z2~o3(v{chcKsBo=K6PTt)aF#)fjTk8PrH=ashdwgt4uOAR!)r6wG#@`POi>g`$QX3 zC%tBJppEM!v_CVVsR+=RwLm}~w;Z}!d`BzJ6ys-B6W;^ILRN#NKU7=|4RpxIgvJ?)Df?#p>F;TQ^q8 zYUwVPdjQmXs>5osIeY=&yI>;rCp+WE+tT^K@GkgdJ`#Mw+2L64q`Zpg@sq~^WK!qK zlA5#78Y!C<5-kW0T8bUeSpdwl69RLiyc-U^?!A*8I`cC?awD}M#+fZmVoR!UE!6fp z-S3(|x|_^b@npBPb`TR^n>9-}W5i;V@}2q@1jP?Q_?>>why@m6PaT1nJwtZ}X#6TT zBRut<3@X@&Glo3j9OX{^njuf!4BZTfsnZ@aXphnjx>*K@sJCcegFZAUQyn4B?){(! zo2Y4;xS4V`8b@fHnj?$9Vb@6IXGTEMFK$FCcI{u|O^vzx^^8dPOM)yg$FBFTG+;%- z6O$K->)7Nq6RJ`98ggLfJ?SW$9GG1l_r%Lami<3NRi=Eu~@gn<;2EA|pFfc3HWei^lV24}ZZzT=`25tHLmrY0RvUhvJGVfATOzH`lNmGL4ZUKM;S?p z7l{L5COF~%bD;uhcb-q3kWUbcz1U_I6>#G}O1(&UuQoj+v3#n|Z=X_-YWY;YK7zp2 zPq`reC50>?rw}54Hi>DUawNbNR{-vXp>>Zw0r@7u1r|`9=kr^qq=Sy2@!eqM5(k|# z5MCTUaiPYYqkRgfH{g4ql|ANS--Ptwp@Lp_)+RPZaK{GJF*3B zc(8qMam3^`3oNG{2#9I5X#R(zlU1j`E2*DT)*qGIm%1gxvDEltS5x|%y1^5SBVANuUxC;0uC^TXSUH7{CnEx1X^U*-MW z@h8?>a%Xe>Bp?Yhf9St*<`oD+&lWl&(l{Ymh>P3)tWgpr zar1ccDWD?$BAVdn}zs%=P*@q>dVO&XJehVQI z4RG}l#gVT?61e^xm1vAFiLMtQ!EyA}iX*`lF^|^phb(St(2Aj;6$G2COn$de z=0mq;kO^A(D2tMX)8|b$d|D22km`uQ<(mL<1OO!Hp(W=ziU?Q44-X8aY;;*ev=t@$ zS&kL03NXX9aEm7K%vaDZ(*1y8mt;jDTTfQ6L~p+k_T>*FrC)Da^tk?OHle^m#zh~$Z zAO_3v66GfYxPe9-FO9jDbklXd;7=kaL%7w>*9PkE5IqYavlkU-z@p!e7%ht}6K zB({T=6P4HNpxKSCZ!cNrbM{sinQ%_dt4pw)R7RL>Wx`AHim>`@a-aowRsF1d z?od4cvD8vB3Abk(`9mu2la{5q4{7CEeV$<`Xgk@Ix`)glSo=9ZhdJtMXW*nyk*#a0 zYCxg6J6x+@0`ePq6$Q|S9Ry%((bxEMvg=?5x5{C!d4uFU*jO?+ipZQxU=D)}utt}L z$W?Ez9d4~P9KJkjOYtrdOeS7P-wBJf8 zoM|hM8D%9ecXwD$UmIqddXIB#+fP&TXM7#R>|ewOc|N|Sg^#fnh;P>MJ-kmUt|==| zc$Np9P1ad%xw7zao-j+LJf676Qpu(Bf1YrptzdWOqmtA@T}bu)yn;|0O5y!H|H7r@ zqz9cK(Ba?Sm_(x|G_I|;KGJ$wX|1W&(QN`o-rG7{!P@U;;Y=D4*}1pa{BvR$kveCk zv|wTOn()=M^oA2pu3$o~VUrY~>Wb$<)j$nFWeUh#s8t0XVzSLEn|M)>OfU&H)4*bi zFIWKIPeWhKYgcY*0c0UbfS@=GGr_!Rm)}oZ%q?#pNW1ql_Af&P^~>`r9Wq_>?go#521_dFqX|Yv&0K?? zN(So}IXVp4>}Ap5ACiuW-LQl*Q1`dWC5-?J-zmz3;+AGcPru-^r@E7sdH zfsxgSM)CU@@+dS44(X0U2eA5GG1-=6Y_xO3VVi5(ROGBn1>-dOUYc&+PpWv2z9*fQ zrpKIMow|HU-SOML7VD2kO3PkbT^zn$ zW|1h3Oe_PmX4!D0G%|S@Rae~{sj5~>BBC}HSHd_0W&oNob!PY;L-+weo=r0>K5Yez zvvB3o#kKl{C)Ahoa(uKBM}__k=|1g`L&?QOZ;GVcsb`p#YBgivo4w2Iiy9L zhDF^+jE`}8D~;OEyYyC@DWaF@iz5_B<2k8(jeNlJgE>^yjcgo?RK4y(F zy`<!!~8BYB0+TVHgiyJZ*aCfWkiuNK3Q$4lz-+8#TB<6B5ru*%SuVbM5nkYU6k zdBkyJk5#(k#!>oBebGJoO}G#69`JZYze(Ki8v6M78pmYi1_<nULc#o!eo+WXAGwd85Vp>@eUze=p!!@zqQ%UYeC^|sQtGZ=}9f4I%qS#5veds^DgFVhXb?)b(l53RhtrgFbg@u7Fci7CHs z5ULi89{TR>d*R!_`E_)c^>14LdGRS;I%wFRA;?d5{zGErIQ!4Q=~p*cFR(5?r~2|J zo&Buh?@GH1xbs%o_vqx0lBaFX5989S|Jb}eFzW01CmDZhJ*&0`o{5h<>+XFcx$^0u zkTcIx7z4AhTMF%2$c>}A<4GhkOMn2o=^3=-9Y;wi{GXK$> zRyJh=+^$G?wS~-Qlf$Uw%dY(y2b{vFP8oUR3@SN{?UZ5HpJCU?r%s5$9UvKpBdQyZS2t%Q=6rETI$ z(W@k<`{2x5IlxB6_8=IcsU(7754!JC7+{GaA+uhy$is;E^Z0RBjY?Mc8^VUqhB%2K zYUtk9L0k!&?9aC#1#Imzijp_E;C0F-q8-+s4{E?zvqWu=!Vn<9g@zJ^&TLIZ_+r;6-{c+HHu!iPP^+m`_s@B>eH3+B< z$C((55TaqMYRDg{J!U<2ia2CO_X^ETMFMFL{!#ae?iKmHcOy6sHJ4n1WeXdmVG+a+ z%b=YXyEaHBiWhHu-Ti1HyuyEsGQOl3UPU!XB+;_#Q$*jB&iwrP_SgJunlk92wP0Ok z;)++T57yYO?>v1TOa&^?3C@){s>*ZHloGtk1Yce}^ig#%x2L2gMl~B>u0(gKcI`fw z!#{Y?iFrkd-sF8O63HJs^A7G)L|(TvDweKKFI=orXo;wR-F= zz{b|MB0}p8SKm3Mb^}Ug(2$s5IOZTPzf?@DS-|6Vlb z^Tg-%uU9y@EZULlhWlL{AH6in`}IFBcG9|kJ9Fac#hfp;JGR2i1Tv4MqpIDe?o&Oc z?%lfF<8qFzx4WDpcV9@oE1v*Ao^Dg*lbmBpEm_vsT4n2PiaRGe9+T~!Mv^+;?bcn# z>D*G{t!hin!)118$c*{6)P=e#uC~-%U22}GcerZzLR;^D`O|G>XJTOf+Ze4!-sFy4 z^StGq2S;(;i`=^xs%FD>o@(Z+X7_u?7AoU(y&jnom#TI(t7hPENevjzQg|qfL!@Lr z#zlht*D{Ba2)tpy6ov;&&t=ltp+*UUy${1Odc(wr!r$8Om^CldA|AFwnem}U`+|Gp z1u>n$4~zkw%Tv-84TRpsFB8aQ>V514c7l82SUN|rnpkMT;GCt2Xc7TOz+sErjF{3o z9rfi|>E}F>`5y8?x54Bmg`zzytQ=(dC0nRds^oAE#`7Pt{L(20u?Jmmu`DVorfh6k;ko-Zn6Vi zt$^e1sqce`RM+shETOCQn=PP5NBRNXo%wu@e{}v;6-f{)uDdDd8;a~64hhvf>4hxC zVyI+1n^{;(;^CoL16X(0>*4TkHI5X6q1&-nDI=gI^a9_dpN)Ms+S~NGOVVrK2{MsUTfaUv8I3wD2&} zQUK1qRwYUNaWIX#1grsJkS&d#t+O#n(0ilz#^k1i;2xIb{F%4s|GMaTFZ^N0_(F-} zzUKLzKPOI}Ui<6QqL-Kb8yYYDwzclv-`~l%u4XRki@qM@GW};!`<5%gr=J^y4qL6_ z9SdB4eU3%oy(-Nowki2{D?_cwyDF*7U5ii8Pvst2u~Z&n*}8)Hnf5k6Jz4p~cRlvl zK@+NJ2H_O7$%vhPYu&k+Qg1C6Hva(AB6z?z*TYj&jgIw5)+Bn*(-!)&Epl60ivHMs zE~cu`VzNb)-B)e$m?IOlVjj-ZFtwoGE1Fp!TtAmi(5i{szFEKgJ*-rC{#G|83C_Ts zVDd|3X<7My2CS{wUG+q-PKea3mEsmfnsr7h8~-pZtXhzfFIMTUHIQ{?S0C@^aknU& z^Ofud_Vk=OcjopIx+uolcq&K-`-sBXU>6^#)9SN2L{ei#9+J%Q|atl4vxQ~BRuRC)j>0EqEbc9ap=rxy3>$2?$gky4f%yzgIS=1hbH zDx5jmWnK6XX<_?NC{{Up^%k}fqeUJ!uOYiP%W)U02Ol@Dth;xP6}EuXwhY1uWWm}6 zdFjJqyMS{oLm(aRLe^`n%QUB+P%~cAe7~KQTp|+PIo| z*(CD?dFNU_f8#l1lL=OMD?FRyCY@VeUZC70TahXA*3y!Y!lWtU5ZMATwwPnoSboxO z!MjF~K#Cjb4%w$IKB{M*GWdh*U7{t!cx%x*WE2F|JeGoeQ7y!CiA*~|i>L-uHqZif z$|od=W-Q!tR*O_+hsmU_R_A02Iw@(CWQ?PZGKOwE1+8q%HO{qIy1LJ`U~SBLN($tJ zq^SGqq*rA^kU1ILHuxS!F*DgAGB80hp7Mb3)FlQ;u7j@yLk(pI-$ol^BE#mY4j%$p zU_c3dv!iOW8s#FyxDqKB$FA|SQsn89zc* zi(cFas&#$2+v9ha+i@eW*MB|jr0lPkn82SqeAGMhVshO1yzM78`P8g-W2yISH?m24 zTeI#K`TE-Evl{L;7u(8qK3v9Z^*vC0XIVq;RmbFYL7UBobE>|n7_2#SXW(+d_#+Xv z&5ogKaXj|8EUj6FiK%9^;b=y4v106P0mL0bf_M2w36p4c$?`WivurAsw)LV92T`RJ zlyrL>6Hxgfa=0x!LMMC#(r6SEV<7P#<{>Ct#PT59+sWfjP0bs@94uZ}QXTF3@fb z=KHtC@~lEiFBxC>(s2?z9$~)f-E?QlO~H;|Up>|AaBO;O;9Ls(mw*1s%PMK@)xOpx zKIbKomZtA5H)5XL7EkAE?y&9U9lSTbG3-gyeD9{YC$lxY=|SEd3oR49o617IO}}Je zScnr(rB`#0;L}Y9Az7+_0r|pwJkZi+;8gt(bKJX~=1_V-dV|fx5$QN{Xa0gzDjzH0 zFldCNlt{quNYz-N`-?>As)aN&0af|o?;dFpqspS)fYw3Gk@q(i@?d?+K}f|lVE1$n zU?qUtLCg0yOCA~J#}4Ub4NO3OyLG@r3A}#r5%LLOJ*Cw5a@iy2Pg0RXrD_M@dVYIw z6T1UaF;hW+s3FyK#@`dMk(DoXAAy1};Gt9#!As(%q(YVf{D(kf=-Q28S4WI0pD$*ZZ{G`WR`C>8K_Y$7dFo!Lx;7X)h=<2hN$oDix& z%D@LQa3n_Za=S6~N{1Rw*NHT~#{aXca=T*aW@+ln>g^}iskQW|B4zzwokRb2y>UHn z!8^t6JI`Ku?&#$ze>W|-UUX8!SZ6e>vFA$VcmKpU4o=qoQ~8ufuI<%9^Ub&VUgy?$ z@XsqW=ly;?O8u^c@Y~}^*B@jcj~>x$6&*oz``!y@(N()Y#YhqEeKQeGU7d}2C>`X# z%NG`oz83)4jL=Nzz1EQ#s>;{3JFNUT*!}VHi`tqTi}?gyCATupsOUi9lBr~CxYW@vAz7{JbQc@9uppcml)}m0pN!E*}r-3iJS?ioK z>nV*Q5PBotG$7N?Vmb3bj}O2!vmJ&Upo?RvB*s4VQN{ugU@}EQQ|iVfO`r%-CMj7>1h`OC*a_N9A)5_&LIIox z7a>v+wh;}u0Maow2tJcAL?XN&hKQSVcQG~u1?OxkD)mG~pj;s-i6o>lrE-Irs}d1o z(to`cc%=KN2!jLXGJ_F-*Wv+Z>%y3=t^}V9-bO%FN7&A627|*CozOosd2^df>j=Jx)|4zZ%|G`dVEjS$~xIr$4Bs-`Gwn!f-aMM3w_N)8P}U-HtIq1{j=bdBWC7 zf5D!DLP|V^g*Zph<_9BQbU=F-fjX0Seu0O-vxiTO*alW`K{<)p!M`5TTDeNxD*Oy9 zI7go1Brv_-urvU$IX1*wSTJ=aKf?urJkn|tnnmkl(b&4JLJD%yx%C^|$?%d@zje*5 z59V22LZJTYE;0C|EbYuS6Ttd#me=sMQs3HZdSmlD8b-AU8&NJe2vr-;pXZc4EL|^J z4;KKk4cMZA47CW*8X)WbG1JDvV(~*zahl5s`IiWRlDZA;I>XyQ0@@J;I@Mm=N1>2J zw-~GFS~aQzKiZ1-4e#qlb*4A$T$Qsbxeu0{oeECO5OC%~Qw~ek(4?d29hx=v4rm$y zx-(r&(grE2E1-5tgDUP<+(+LRRDcR=ryo(!8zh2}3h;9tg(|vM7hBy1;og+mZ7NDm zQ@|qc7=Tu$m0l?QN?Me(2o`gRd&Y8PXJ9EoM>%Oz zy0E6J`#K3^Lki*{K(Zw7pQcVaD1hp63h3b?Sp6NiKbC_7;LD⪻)P2+Z%Fm%9<)# zMN!o2aX;^Wm|xr2nuU3>jYW_?3nX8Ua-j%?#3}=fID?NxIiWr@gpaAQ_ye_$m@7Y= zvJy0)j{@TR=Sy&Wd>|Apfv>Wapx0Nxp8pg|)N3U^2>eRnTY+!&nBZdEruaTBU*p6SVWZXt!qrjt7swDGM&j!A^sw|h^+;I|EgR|kjn}HoAR&g zf6TuKvRmc4VKhY@)))$obuGtlLUBpybjVwW5`w(!CR8CT@aS4G-~mO{C z1bCSS^X=@)2*!d`)kxsBLq}EuTssiVV~~_Eit_QWc6INi)LE;Oh~^8@sFf?SKEzi> zr3W|TmKR~o!hBR!-OEWtVg(B**i;e@NO!1~9_S^6q*VMDNX0=F^)pO7*x68lVE9-m zs=TKl-UB!UkEtP7Ys`pJI$`e!l8n`r&IZN4h&^Yew2(%K zTq)HrHOH&{I0d+6`HU_e64G!aL<+imqG~dSh`23An@cqjFrh`Dn@0pOpsh#Jxk4fW zV*CjZ#N!zVn@FWXv&|Ke#L#xK*+MEbz(fRC%>qcuLIfbrNkXWq>S}VPf`pR7li@y)z-bbL$71|{)XCyJ~k`RIV3|2OSBmxSls(LVu0Sr=R3xj(EIPe}MNtURB zDrli%gr%5k(@^JT{*^19I{2RfuRi}bnuJ#zEig)f z@IU;93>^jPP7NCAegzD1`$1(7y7y=xkVO}q0oerP(L>EiIlt-*%Ldj8mxz|@pwuyC zsOb&E=Qwv@pp3S?(6X;3fLW7sz9^T^9TX(U@Afdd@5;q$(5tNrb3=>}8u+KD3pp(uJPa&o1IU0Tf?<&?xg=7u zM-8O9UYH;5UEM66ZMIT9PLcUoeXYD_8>zmes&CB$zp2tpF#IvDyXcYV;lAD@d9LU2DDC5et9|`Fj}LoxPGz3;nDk$M zNj(g80-Esu?o7H|Eib9=v?{A90ei~o`>=eMLN59CM|RIK zwf*_Pu)aaGis<*rKa?!s(Wj5wcKGb?ZTH^@qH)DZXTHL#+ZSBx*AEqFD5~y1&Zi%q zUOw>lHhwDpS&pLeXK+~T!;#i-eXM#e~98&vxCgFcgEZRQC$NQ%;I#h0W5# z_<1u#8!Xv7a3hq8H>r?RMMxvR7;jG50Z{B4Jj`Dhq4Euk09e;j^8q3yWd}nQ>=gjW zEm6LqELMXdF~U#} zrlS2l%wc#KF`R`OE(jRV-U|?c7;PhQd=}1C=E!8ZyV7SehbRW2zM9WGd zu}PA2q5%WYzeFZXPM~3Rvv4O#=8;5@j1Wu`X##_a$`!g3UEtt6A|f@$0G`{3u8zp5 zSaYgCPBbK4GjLBo%K%ASTGB$sva{f9lC+TKa27mF4(RsE7po0)nUVULhmV2}3JN}$ zRv-1rag&`QtB)pt&B@?wcp03Y2#1LA;C^z5Itq3vz)KiS1oITQYVtlEG#k!Ew-Fx+pIS5H@Oo z#W>*K%c7*is24^%W)*Olva*=zk!v;8cpDm7G;t1Pmq5}xrj~OVKpklNEMpm?{tE$k z{R=&ypP9>9b%xW}hvTEY20BJVp6!iVj6D_!!y?*TM=$SUGqkk^tRxI^V4S2}rvQ#5 zWMu_SixN?ug{xu|_$&3$PR|x?hMamh*B+AW!LQmC#=X*T(jl zXQYaBvR<)_e9ms6i^8`-d2cWha-gOODSj|0o~!*}{R6&9>jK;c{>DxyGyskC&V`uI zzj|-&S}5AmAQbPH1*8$^uUgMTxoN_!r^y7lCThh-NfHe~OlG6Ki^=e6e&&9)n6# zp~Gz?gK`Kh61WjyXajF}t`A1fFrB#zKpZed0-q!>NNPlT-gq)Vv*-ZjLAf?=57FKk z#ws)T70_1!VFfMJGcd^b5j6!RuLI-hdg$L_q6G6QEqcY;(ec97IsV^wI*s3)RTZS@ zbS< zPq*nW%QvV$*>Pdpn!Vh)CFPIzy#D|nm~{x(VgCs^4>(ruU4?o-+F-~hin{mrs-yMU zP2%(B?RsP8X6HP@^{rSEAK!6V_|CF=E2y{y#~wv?dE*1?!~{{TwXTPQTcNt410)IYcgR}a^)SB{s%=I+Apk49pj``<2*=+5^rJ@!vyu6w z+q{-<;25`7xbWr|-~#b{Nqsa-n}u2EU@ITSU639Hdb5FTUaR?ma0~-=mkeJx9WCFx z)?)Bq*rvocP;-f3oDqKLV1qTiw)6vO?~lPnBM@!=sesqFv^1Q)rM$h5PD-+LBpU4k4Sha z+!Pi-vax_W3C3bj-REuq=#3US@lhz?B1^z47v?{wu(!}P{WGX)&W}R#KR6V5+*I)` zXsXq`E^#_F&WAMQ)5@y-;&{fxQmaANaH=+9@0p+EwRLwU#^jmbT#J&_PB`rKvhY1! z=F>*C0CPg0uBFh}T>N>wZEdOMshvCj4`N{uk%H=aA=@TLqR~((cwgzwHxAR(f855& z%ue<3UPm>M0;ijbX=a4AQB;PLeaPj=W4|!g!Vv8vmBF!{ZES#TLB|OlofrfDLg>%n z4S2S(Vw8w1+2Opt9|hba#ib0@nXVWG?n8&numJ}Q+z51y+F~B`xB_rPqETsUpo0Y) z4CEzPAk`9r*<+Va^Oc3>0LWh7tfY3!N;$NG1_2|CaE`PPIWRcGk8QHUlA(N^H~(8- z2AFNEBtKb-y-Nmux6m9I-@%wMV0@ zedl4Bvyj_mo7FThzua@lWn0b5$@b-Bb~)MM!<-RXA{0KPQM(CHM=k{$vgLr)4VSDUthQC=OIrk&%ov^(L=?oSdIc`^&n z7I`z*u+OfP7k-^ap}kH^J6|#e+g$bnh&Mj;lzwG|%U;3WjSahaCxci_j_7yKSpfxG; zpK;l6q;?4$*?T%xpg`-d!g(Re=MUIn&@6e2ShOy7I5OVQdEd(A{wE2%=17)OfttfT z*6c`w$XU~T*Y0oK-?G0=Nn(S-le*tD-Cxvs-yuhVTKK}{tk3`t90Pe;5}f#Es`lHW z2A84R_b=VQo&BauN%tlCwr`xjungdGXQp|v;QBj$d+HzO+M$M~`!15i{L_&jPLI^R zyEpWX!woCcANZTg5PWFE$o!JX-3Ln|0ajo8#mc$%jq|?_wZg~9yl3^LOKHY#I5uFK zznJ8f)o!Q~u=+zvWc=3aFE+m%_dWG)1hIR!HOiw%rgYZuM6ZM8mbAs>e?&z)!V;5_ z^}hx%3(AbQtY7oW_)ZBY`G+=c-r%C+q#)(qM$_bwdC0PMH~TNw@mBScJ#*f1lBZ)F zrajAp@4x!C1b6G=UA5IqSQL!M;eV#OHL?Tw7pIJ0SZIX~uBa!5VzclLWfJZ{jNW;g zaMGt;E`|w^UzFzqU+pZls)ts|mgT`~yTEMPFEj4~gJ}tXyUWlthg`{gIv;*;ExbCI zC<9Gw4MCcP;msdgC7gf${0y2GMpNb;F!dJI&aZP>&tegqo&rC5Wr7VM@EKns(UTgZ z<(uzdNrmzA!b9|iy#`cukGJn7d_F{=h&Q3_jFJ$WND{rredAYhA@`L6Q&)%}$xwp0 zAK99%z3ZX&CEoSmOf>#T?xGh0&MpAN#{dR@_(P3K>2RV+vS%S{Oa4b%UTAfw+yd)Jbf>d9kaQsAM70*Ns{o z5N9J*5q5Q93s&XM71$2}0o{6m;0An05NgFXNo0=RjWA9~M&DP-e|FNgm3FU+ES|KN zDbA|r|5~>5v^eODLt>=d?$KG_D>`!fJxPbk241UGHe9Vr*!bYp8}C!xS4`t3L7BZ) z{wdytW|ip++GUrAPe^=b;mW2c9k1ngg{^Nj*mN?~*^@`C6Ed=ugCB?J~9 z0@mO<34@p)prHnuT(RN%dLGzbb_u(B(ejOE6V(u8o8IhMUUgTFpL;K{Xq39ON$WOh zG~-xOh_0ZlL5T0-tXY7KmxTawz=}m+ZE&l951l&nu?+)jhk>8YDK!!p?F6b^8N?Bz z0RRm;$C-{Z4~;j&=zASXkmy=9;0E0~O7?(na$T;t)3MUxQf6k{hxw>%(rNTk`O0~f zq^k0jant3S%RSCPgRtrhB;Y|8tN=gWD zEvSufA5HW4T_?tY?ARz=J0eBxZnMCub8U3_d=(!ISCh|vrS3%3m!%4O^ah$^f@>K& ziTcT_xT$w!sqj2ZEs2@LoHBvjC7hfcldU$EJzHHwl1-wBtmNOOOfDxm0VjM9%*BG_ zi>81B{!CU3gL&!ScZl0i%B-9XtDUZ6*(7@si@vOXho~UA8@YK5t9vH}?ng)+r}Y4^e0Z|sas2s` zobvAHzwf@WaN)^VFvb{qOxKM&QhK2>?AX?nWYqzczr|YinKoQOsIFa<=Jqk$Kk_&x z{xqRLT)w2XQ(~paH78e8{E>cfnT=}F)04D|u!6*g=5E(RCJ4C}kbGAk z)Rt>QD!4}wryi}p0jv-eJe2QSgREeU2$DKJWj7uuno^;}?MD-Aat@`vsqYcWpDdbtdVATj} zEmXp}Xyg-`@Z~_G1Wf6QdURTxI588%?1(xB6LM4(iDuQq_?_uC5DpD_{f3>^b|+Z8 zz^(#Ev&_Dd+VkhIbQT@_8HDz4LM<)@1njCGh>qZ2kK&{>H44LT^I6e+)KC{AfvI=u zCecO>Xh6|y+u_RNS3`a7He9wD9?9=CYwF>Jj8#taihcr!s7@rgH$EVDwKjSy>}S~b5xxvD--6?+b{-199f{c4W8(JxguCqK!Q zKiP28-|s`wx*A^1lV9tuB>MIEuH9GheR*}_6Dz;1ud1KepW9kND|9nbPPX=|wLEp< z${IgtoTr}vtiR?T-qxDDnkybFc^Z-{p4ESDx?mZ;{E4>J`mKjIf*VfCx^yHqJ?05J zc@C{+msAyDCrdR+IN681wS{ig$%x;dzH5~*HCH@qpZrl8)0|w5ezwfw7KBE)VCnBQ zEZF+zrkef!ekp>jooD`N@MkGKG2!{O2LDy_MB5})*Z0KEZF%|M<9ZK&c^i8DUEFnx z?{tHIE>w?tK7Qx=d9l^~o%-3KyOHoW2XoEA)m&mZJ-T|3#%S=d+cEI~dHMIeu`f3# zJ|s>rdv<8fy}ab*?>4>6Q@weuX(;Wk^|Z^lZR4T6#cy#-77t<`kaz2_c8tuHQEN4> z;mdwzoIZF&SHR!ZLCSKAJE~1$a?NPwL7ttpL7%;j@@Ls;{kb~xuJt?O~RbN11WgN;fy{Qu#X?rtlQ|nOx)nO^u?rbl9z9`JLJS_TR$?enq z&*A_8TC_*!&jv?cXaD+;PtjSwcTL=UJ~xcB^3v1xo6mNAo*-S_8QpcI;lwa-Jo|2* z*m*UgyI=9E{?hC3fAlY|Khf7ew*k&LeKS$<984_~imO3?-Rm!26!!fOk>OwWKVGT7 zG~Cz!d{N~1Psflcgo4ofDdR!sUk?AU@3gHuh(BR9x8Cu$7dML+MMht^Rqq(D47|My zR~oKHblyC;C-KtAy&xW-N`K9K!eVINdrs{>(Z8p3)gH&)_ow?e+Wm3K(@VejpP=!e z$vsn=dpCVisDAVn?EIs1@vk$VejSVx-BQH7_dWQd@)urj*r6o!**tDXx}(LHo2DOk zs%0)(b46R(>)pji{%u+N)oyIxNMQBAulK8DBHz`<@j>$+i zHae_kH&pEVD6OvW{PnbE30|8@r<#zmbB(e$qU9UL8X}O9J=P@#P1-S_Ka!HVb%{%{ z_U{UrHS55`(hITt&1OFUBi%KQsgVoBD%7c3(#d??{SR)(KQYPq|X zk6CLOBDMCvaK+!-fR;>H{^Vrkm58e9B+=H>)hDkdt7Da@bI6uOYilB)7S&V&=j)I1 z^;U!nN?0bsq4;l25qfL+F9H(1?>Zn4Jju5|2bhry^Mkft(q!got z96^>MnATVyUZ%XsU)y!EdZ^JaQ(UQS=Qop9C0c-BJUFXMOESgRE?J>!Br!;iT9?l; zZq&>R^w*ZG7A?{qs)Za)f9;{FIm}ZB&|J;++J`JNOZ*2JwW@Qq2kzSvJr1dle_tqW zNMgKVPBHVY4!0$I5N4cLosN`T-JpbCNW$6*{aAfgr-FiE)Qf< z_cSxuHq8hmMXJgNr~-S8r;dk3w4YOj3UglYYKA8w)SHL%#3MB!ud*Q0G%E0>6S2}) z!8_DEahS;vM~4|clE3AMvPR7QWfyh5b@^D*vc(?h%usMoQ04Fh5%{H23m1GFhD!=z zENrt9T@rXx#2ia3W>*J6dQR44+Kk=4CST|}DNs7`U^lc0`#x_<$>p55PTM2P4XNRd z?LRTLKTkot9<%F$?B3FXVeZ8<&$i;9FAD$us{i?%tNt%;;zPPV!Kk&$+x@PUDudm8 zf@-MvuxErJ326b7RY|xDoweg}e%d!`U~PTK+&8MT?2hR_{_^;}o$DjrHPv6dJ=T*{ zl#Fj!a!069Vqti3vYW@6ByYxpa`Ii^%gXL3B|`8yX5BRT5VLt`_H3i{rOC{B3un)s zZ7A$hX`u};3}#`(v`Z9nEWxUBK}*4iVIiL9s(Z55<>-YfF|mil12j!u$=g{CyBhqRWDqZ}n+#(anj zH6$-J{VwA!o@3#jO&0d?wL7INOuBuK8nm$Xd;5vzNo$p|G13>(1UssV@I~i~;_M=1 zxojRr#;G}Kj!3BjQ>^FW@_hod{)XDDrO;9Sq+Z;0XXt;eqe^rru`?p%N~qrDOWi#iZT+Vm^geag z1o;QtD(*Xe~ z@ZM&-Y<1;M86NjDqljHuxKEQwW00zjqUGyr{{8}!zbF3GzvGkHB>CZ&&V;@I!mIvb zhe&osol(eC>9Xg>pV;*gok0;(w{!O=oLV`ri~eL$L{h+S4td1NjkdxJH{L%dl$<=< zZvr3ad-R{Db~{G2M5g2C4v|6g3GuXrP@3ZY)Yn~I`zH#%?CLZHgO#n6 z%*GqNZSmEq`Ts#<{ZH%3nXO6obz!a&n}{(z%)-T{780uE(R5@JHp^Qem|L+=*e+-! zoIlFRh~kv8@Jt7_uM8XpBBzCBI3qrVVTN4KVoOiO>j(3D7!;96BxGPX(#BMVZkmNb z>aL>mf@`!ewv>(2sIXUSYbdI46%PcbK%N6thrs=2mSlnzHu{6))mI|)la1}&eKovq zdWGo-QTXS=l@G(F354u$jchgnSPHX?mxTBf1gG1;S_CMXSb}S;@S?ELvEw`nEY}QI zvRlH~X!&;A|N6xM{n3B;qWlKC<3JGwfuMvH;u{e<#)?)_jT!1DdQ%S(fOi49`_@lsc}vc!QjQ;4 zS;n;6WoXMOWpChl!-^YBQ<(v&#(dqAFqwsEt?+C{#!{mKw0z}u6$CwGf8~Gpgc0C` z0n1SqEIc9G&h;?E!G%m=uqcHrGm&xBvLs=*x2?r0@sjKwO+^^YNvA!v+g12k2zS9@rF7nU?<65nCT5kC0%BWPmbUyJMDYvCtD>cHfxq|a=7 zwHA3IBcqOew+;WsF7J$HtaG8a+5vBS2hINne|8HD=kOGRjS+KL{avO!jQJcvAP-|* zD8X6hN-*YX?tOwPtq9COwm<_vf)y?u!7ypT?h8}hks1LFLl7!|BXGX3h}>ga8%tD5 zMQI5-g*phAuTzM}#0oa=n65r8BM6f4lx&)^6`s))xYgopK4DmJ43AChLUJ4)X4h^~ zObhb%iZM*1qJwvyTjHe{@h`{SJ;Oq7R%R;dcB;Y0Yl{y^l?8!g`+2UTT*Z~Kq^r~XdtXJM%{l3jwSjIh*~a>TOS^~Gps z=`nm(7a|5@R9z*lu)^z}JkmEj)_e2*`TC}HQ(LkNy?;ISdk_$qeL4j!>1h<;KZ{A8PRSFIoD!n9IhIedRj`vt z4jSwvJ4}!jjohLk74vCcSWLQ^@^P$Ee&!)T?>U|donSACu3bAWbpriw1^1kXnAC1h z>B8&ghQUMqoV#u=`Y==!5%W&Eb0HK+Cjq3wl|@6#ci7Pw;*dY*Z@_&iEt#Z(FR zM*xw>O)MZWooU=$*MW9Jh2rZCC*tT!{Su$p8&0W{ufN{ZLSE@NA96AlAD(>KHu*A> zv|}hq|G_m2QmF?)fAX~!>7{<&H7C!B9f^luA6B#ty!yYF?|-+E{-5Wm$&PG1C;l)h zjg4>ZWw3CJg^;ym22HqW(OKkufT^I=B7~hC!nWX%iyBP|Ew|G#YbAXW{ruDs{Xgam zzfF~!n-}I0^;6s2)rs!EuUh_Q(RRc#q^!d7 z$QF5prDce?Vj-4?2oQo0D^(WYYY_=nz%*Zo4NXc3t#HQj!K2CwGcLi+LKJqs>$J7) zxJy0Z*E%hd>ZC}v~!{8seu^3}4^LeJ7-kqgTfWRevHDw#dmFZz0VGJCp+ZPC%Z z=!lVb>KgnEhtSZBbe6v8@B?H^>Zn1>4ywMW%(FNB1vT0cr(cFO5o8*x>nD3kLxZpB zF?SH1^zpDAG3wz%C5uv)4}=M-*p@^sGA0FE8thZbwxp7iuxDviYzr#c&4}*~&y(Gn z?1Es%7 z_<{t3;J$V~uE)>OyHBSJ`!%4RXIt3xF2AqxRaRScF(JjTCrR-3BGUTN;8QqkhxOzo zKv=%@>wy>F!X}hlT0hb*bLac@J1xF%rPJN-PwqKm1=sn(j}Pu=hNZ{zL7T0b4GQgr zs$G}|B57>N-hj5)*TYGfu!_rzElDh3Sv6jIR4>1&ik7d$b{{Z}_sp;Vclkai|7rDm z=ch%{<2PNCD1Gj?fHwz}x`E(BR5~V$AdHDCb>|aV3q@HTLRlCJjHOdJtzv0jr;F=ElJzPQfL7>Nm@{W9<9O-tZcZ&flhA zu3vi3zS1&IztWARy&&IiUD3M34+J~9tD9XuM0|Q)?)+hEN%uFG54%U-<-RD7Q#oYz2~st21~asvS=i-s4Y?#?yg0h z-QOIt-#By+H8{_lfqVH`%o=cvb2h$5Y7j3f4P8WZP$UP8Wi1jF_Y6e5NpOFSF2xN5 zH!*e&+{aOreGP z&9-l3bd5E1kRk&1r6EVIP~xO8xRqm7YPSDsTheyMyFZczdkE`+=B2zfX)Trd55S0N%Uat0)Wyb z$1q!S^*SP8QoZM0h4mGPsW>xI=86+1{UbG<}SH~JiSP| z2)tlG2hYHQTT5F-+PU*jAx~L~9Qj)v=JnQlu3;9})0Zpm4oGrI3+iEj*i8lp|?h{KV6jr$Zk19Z*^P_8Md$0X&$qy56p@y{+Hf#UulUN(tmP4WE&!VyxP*Gw zc+LO=kE5KlSwXhp#!*UATDx5*r(M+^YPizQ8IjTQg?@8_%)`@O+Zx6tvl*lYJvRUHp4tEwX(j(aT(rtR7 ztzl)nmFDzNKVlNFSQ0BFc)Z@yW zHax3a;MNU0yIJCP_%qdI-+obK-wsWJr?enK$~Fr>kza6#&+6G~kTj-0XN%1nI^5r`Brc zKKaU537EmVYlk#smOep69YDo-I_aoFxug>W_>5@%u^S3d?8`s8eSGr2*WU?07Ou66 zeKCh!8^-Q7WGCFV=ac8NHqN)$!Dek_k!FX?-X-`!3y}&*m@bv~&$oU}H&*5g!!$hk z%+7}}=`dlY8`mKc=E>X4La|Q)9xD}xAi{8rTL=Oh;&AjK^;eAM%mMtBi|;K2+S(oU$b7Q z5R7qLX6Y7FhMqE>C?3SoxLOQ76+AJCi)ruWY8e=RA>AC*iLrYvx~V9=8*ls>x96tt z<>vvXxND3r#6@DXeg5LKfj~ueKau>qd|fQ`REX+aY437orW%f0TDoN(%|l64n#+L= zJ3N&O`0h+qb#5@cmofoP5L-CgDUnL0y<6BJREBsg17UDUxkef!_aq!Mmzzw>z#DT& zut9PcQ<<1t5)!5XiID2dG20I1*Ei$76*)HdKPJ)DPqu20#aN51KYEvoU_4 zlP<{`nhS()pyPbwUZFFq@_;f|eGMNE>Z4TMRn-{kr`C;M+LM+Yu+I^;hxIOOQ<%dfOwM zTAOTcdt*mmM7AGx3Qll=ij&!fKs}}}#cu0*wSN&IW$2bz{#799qNwyrBpU#%my!aH zFY|vT^#@t?eGA&W_*Bk#Yf*oH0W0&Qzv z_-hU?sc3ricIti27I@UHI(gs>JPI_wu;vU${rw}@_1l_bt^PCp1PlgXtRQZ$g>J^X0c|ayn#1V|H~`%`+AkCkzOX2-heN*d z?+lCfcL&W(5+nfd3He$dGIks7glzM(VDRuNWb%$;xMG4b#@>JmM#K76otdC0AS(n_ z6NASnC?Y@@3<^&L6-NyCNMZ}To&St4r#6yPG2XIwr< zQ_@n$#wuZ-)$Yci`Scnui>TsQayPw!-rHJKI^ z{bpvB3a8ZR2lD!@rq34@Hw9}aRaiP7H}x2f=Te1Sdu62ERMRKdOYIFa*Ncp8o~#va z)c{(YnOJ$_R6Abd3MQDt_tCzSEiSdkO-i_nhY$2_S@Sa)EnmgLS-+rtF5dC)@*VY2 z6(PMot3~*WNH0xQq|}1}k~8cOCriXU@s@eB<1L6uob{Y77KC`hmN{IvEu=XdF(0x! z=8-kI92RqLDKo$HBo?zlc>`vH5)^>~c+i+&d>y=mC>Hb2e&bHh)53A!)RRhIgYgm} zpQG41f*1`-LulGwf3E+vF7Y3p!teHzlXtEJ`Q4eW-f-ibUuWEhxDQSrVlqF(P6Mj; z&UE?KUF9q1PgjyIOjnX)Kg24gD=p{4AI0>{Zp)Y%rY+1w#=)J&M7%(pt5O)ET%A$n zS?D8HDpEDRp56DgFe&ASfI_|6J<#!KeYb_jf&W3jspE3-OfbrJ;4F}2jR$tkC@BDGGlpqXQ)X@jRaX=Q_T%CyeOj>>(u z=lA`-?mzeSy7!M8LYvK=VC}v3TF-hu?~nAoma^1AR;b#8dCsd*b{A*^wb$_SP=}wS zgCptiYi@CDgs8jfsBVoPl0Qy4q?}r&T9V43 z78<7=d^C#ch7Z(H4bC>xn{0lRGo5DpUP-4CY7$`~1E%S3AI zF|zLCneVy8J=Q%9sUH_XKmjKvxpXE~GwMs2#BF<(;sdvce!ev_!zm9MKB6}km5t3Y+@-A1YZ_rITg#5HYP&z! zr0dX;$JCE{LCst4ku%)bZY^uP;So1{(suKG>Irr~u#ub0@H0;<8r#0Tt!q=E-2bU< zeU#n3ZcxR@=WGu9cXqc=pi25ogKb>@Kw(Kuj>ieBEPnZLm8(a98EwToTNQ1#ZE*Y2 zSGH|2MrM@z+JQ`uQXhM+N~!1BxW@)n{^_%k+P1ba)&)rUep|j~7KJ(x7N?s%t$-qX z*cE2Q@LSjeK5fp8l7aNLh&jIGNilY_jtVs$Autbc6B_LK$R{zWrRe}^_9%(7jbRU@ zXQhk@V8KINm7?@p5*_x_tv%>VOnB34t1X{W!S~wgFjbOy_QBe6_R}~?AbimV+xZRs z593)IvS0x{u|EOS{3)O>rX7QX+I%LY*?~D5ALJ=Ogz%D{;c=GCBcN6ezWX)tK$lEYky^2sK#ghgWJ36$G$DQwf6bjq6cfI(&kQVf5-lW@0}qnRu_*i z2G1wDvTaeuTU(ROdza#Eb4}c-H5je8e~ztIEpX=<2r>&wUoFcuv_^LbMiO=TYy#hb z5NuQ=9*O++6suVyr8g^Ef`A8fIanGD%!DJ#uj)w1b5`c!rW9~&zd!{OHpdl*V_}NX zUNWDOQ(c)eT{1_ceD_=TsVI>5CU0MGOvw@SM~h@@awsJ-XA+q#LlYI+1%W1utrJ}p zR=9Q-t;H5^{ zCs=u&lm)CiLL?#BEUaBf#Lz2?vgd*gcrY*6jOTLMtDQ-{tJ&*J#LyJ1aIAG>ktL}m zmBk8jmNI-ENDjE{*bu=|8sGK)T$X%1v1&^qILuUeCDjrds++JD^g>n01?jDq8*%UYS3cbuZ`NFn^j?p360C`I#%P zd$u9yk4Nj5Kz`99>QYDm%=JGnm!E^$BlI`<&zdc*luPn6@-Jg^kG#B z<-?bVXSMR!f)M)p7dM=SRV|AReu-s1wWcRroL$X|jNAF`F^1sQwUxVm^_44~`v^Ht ze|QeXOrcPu-7Bm+6D7-64m@uYHA+9V)kdKjNSF}zNMc8fXvL9U)GsVWBDM-8pnT<7 z)(%ZLxK%Mmf!iQ(*FdyDS{{@&9)5&MFB5^g*7pUR#iC08B^*O^mh4}V@&Dvs{iTFK z?P8zNTZAh?)x?!o3zTJ33L1MJN{x9my`_4f1$GEV^p0qr2Ku37Z3hWO(j=-DT? z;)41JG{#CiH8zCDrHb8fOzdzbB2qpmQ7i^g7J~XA=mYX+fh1_HM~bX}e~yUkUqqMt zi?JXw3W{;-Ckfma_=_?7Gm{01a=Zb>(8bhBybd(TczwJilOQA;ipaJE0T00>iBN2U z;4Xbcs*=bT*13T`S%15J;?%xm{#!McW5qidwsPs@kEtyo8mY0}Tg0)vE}7O3P$?{? ze&#;?>zVB2sNNrlJ@Y}Wo1-94-8-t+zj-1@E?SITELz-ZBRg542M~VHmO{q5c<;oK zPg*w-7iT1Io$zIj^b$0vUHq^bWtglC|-vH)g;MYZQ@!Fr1$QXK7`xbkuKOZiScnRW&WdK?fVN5on zKluy|RrDVlqCsD(4B8hUEF5f}mj`}(I@z8Nj2=Kf`#>hL{z>C(7;~-xWDY{hA+AMe z4@7}MB7k=xkXGGtdT_`;2f8mpnU$q{0-_(&@ILwS zBr67a&6yHm1Zb7va3bXHep&UcOvB$m)|vA26i^gQALjmK)_3O`HZp(aR^BkLds5e* zyu-Vlq8}TQvzbboNgWMoN*&C2e&i2xUi2@;{;e-sUAK2-6245fy;RrB)3x0*{N>=I z+aaOhw5#F6ch3y5WwZX@H|b-I!WR7H5CGzdA@%-eI=a^3n5QzeN#8XODks**0IktW7E;a{Tl+?HSTH z1^Vjhg1wfIx9a{!2o9^b*MIA%%JZZOKiqGM8@B)PJ#P&N9CGo8R|9@(zDF{7ZDJBWUf!$u=FV7t_=O_O+JYwg9SQeO zH`(ujMBO9>;^fUIkqRdlUk&bSn3y}4_Bx<+Nz|6WznlU#wauO!i}(7L%P_8|o-oVW9j(Lap*@SR>f@jwjnT&8Ib!k*Yss&CDX9+RDH+KQ=s94Yq#=xXu-x=4X2z zS;_{L@%H6BL9o(|4}uT;2a|0zC=--1k9v)l8aygc0`)MOyZEUI3jZ;kfV@@IfxZhR2#mzZFs)DK*JE=q>n4@oE=G7*)`!~CW!TdK?44czarr5w1aDl}-|1`{>j zFJz2Hwqc>742Vfylr zBs3;1EvAOch0Tn^;?}KgtUW7UJz&vjOL|mc3o&VDw*U6_JvVrqyT`iqp$xY4Vn?>- zTW)Afby`ey%o$|;>X@2wIFPjMHKFj|nV5?;??YpbRL4}OwpmzMBjr20Ok3eIYS-@n z`b#34DqgM?pP<#RvPHV;ybF`__?w8P5bT_d`q_ZxHXBOl%|?%5EXX}hyS;*%pCyg# zbdN(G+K+_#75YMI`BoZ)rz53yurfQV*YGMSl*CmJAzYYKhfyXMrUCU9U7`{x&FatU zbdLrPtE@lNL+6K=+~cg~HwzJLs|0!z;R0cPkIir-cIVOA&ag7_L_M|;!|{OW5j-)l zkC)g`I4;9x!zOVsVu!U4HWW?2JlRws8(-|VoQ6xkBP#W@1#6x3vUFTX}a_iAB$eXjP2(p;62rvto%z9YS#9NI*^@4?WEKYC9eUfN?l zbanaAz^z)Y#f<-oo~(x{Eq;7)F_@0rpwaXkL9@G? zxo5@Sflz84E;2q3`9&h7kx#WBmA98~4HnVF@RUAoK!W}l_>Fi_GUZ75%I&R{C!l<{ z>mHk!AkjTe!%&@ID-Aop0u<|hF?fn81Pd>ODh`%gMH@EL;i0mn-|oC~Hs>b``4me_ z%<8m)!p?_1!h{viY6o&cq!2F1CnBH2?|v05L*04qywwdVa4ztm=zc>AP6DTj1hy5~ zANdqk;#t8$@L3~rGKgp*S_+;;XuwNrnq?}5GOF3OEWG{5{01XzN^f_d9foaT$+yo*Tlj>ulg+Z+J(4_!If9&k-CO= zk-7=tn7JuW4pJ}p290_Y1R2WQi(X)|J-L^|p*0Dy*qE~`pkoXAy+1)aTgKulMGgv| z{&FLMwq&dZbz9LJi9v4}4KAU(KmMfv@8A#=;5sC(`o(Iq^jz!bb=9gXXpzaxg=HJP1CfR{*9X zJ^wFWWey?b;LZYe1!UilC#(tTxt+`qW=cd2+ms$J4j{~`4?&2Kw*oPU+_Zg!=TuQP+exaa%n zjL8-KinVhs_wJa^-FEQ%#2H`mcg?j(YwBQEbo2ov6{J>9T2tJ#5!^p&V=c~q^->(3lsrak(EHp zZ%+f?N6WQkSWTcG`vWfq&QVP?5dbSdG>UoomSP}Q=DW~2>C|PAEE?!j(qINNFeehL zRAI8g>QA)I}OdptR!H znfKW)&0~TzSR$PQa%|-Ky=2vg2mOA~5uFu(-p(%^V~Ks@Ul(Ie?QR#3@x9T?ttEcx zWV2f3U`hk5kw)MW_%75mWUK3@2;^2VTj-5G*qX-l4WLi z?&H1hX+RNuPouERr$apJva)QmI?q+b)n%QbT3`fEYPv%2-Dp1xXeG!N-UQuLAq^WU z5Z9Gt&?I{{a~ zQrV}zD+G|F2JwE(SztPZuA2&v9tIjy(nDbbrzrySkXDL75W5$c zsUA(lGz4Fd@9_b;4-e?GV`mWtkN7F(qA&vbsK9iBj%&>WIH&!V01oFvBW`1lHyoWZ zaFh^M4DsI=H|OL5(N>_wpt}1;f|?Ay0sqLLE}>S5v<>opOCol#{dDSnV?9`VyY*`q)b zVhuT+>g>^0SuZ4!q>!zMO^kNtAuao8tm7 zN4i_0Si(5{rgV{(X7Lhh!qlBFQz(oC8qx63xdA|&ezWw zER6C@^H%m5tt!bk4B>Dg3U`=@I(dYKW}}v(nTj(K0Q+!FuxZ-9W1i@-Wf?y(u0pobbEEwd?P_`1bR+`(~t~Wtn?_-F@6*T)uhhQ}dfw z9-aQQ9=}LQocXRUJwK=GTTFMmLA~Xd_?E692HxvjZh_A7hw4c!Ki`nmf}39Jj_hNn zO-gq*e$#p`?6juu9B4nLWo=beMNiy$*qWw#3#m<2*#XUODQ1@~g&Yq81u9%lHB_i+ z;!YZV(@7LGsISLkQinzw0&;)2j-;mLq(xP>vD@7QsKrDoNZ$82n?(qGY| z+TBrfgy|&j{y7i8IXxG=w|KdnwQwx9?APQo$-LLd^_ye$=iFbw&snAIs6s%QK5%n< z)s(H6e0n&l3Z{BL=X}n2od-Z2)-SQ&-zL9JhCIv%Hb>yynP7x=b0$_lIGH!)063c! zRUeyHRLbrpzk9O!rMsv{FZmQac=E}n-q+p#05azK&~YYZ zFUMk2O5Y)%A7s-keXdiFV(B}1u^Mv)LdKE9KY;l-_5quxyAKFGGZJKRpaJE)zSZ!> zCJ)d)3;$=$6@ch@S5)@zVYWPIngJvaFol3BBnRRS9LNm9>4Ckzc6cQl6W!`VwhK5O zn+LG3Dfo{1!70URld`_&9EW9>%V*Yp@gKGN&RF0@V592ag-ROF?vDNa!>@4>b#m~emPfcSYC_Yj0>vYgP*5JmwHQr1>H1|nrO@sn z2ut?pE3+=W*BtSNXdGCpasBx;|7I9fTOV*P#wU1z^jCX7ro6{XF)0E4hax{5YH&}e zCV@LUIDh?aV!gs3{AXX?W+fjKiiMKJYs5ekX9I5q(DINP^(;0l`0^&>v}_`T{L20L z(Fo%PMld>Qa3=ujLu+e$EjARn0SL^c`@HGU^qOg|28fC>RH{ARV`nX_0DjAPLVXpw zs{=DwbP*UZ#hO4zSt>8qT$=}VC*be^>D&SBI-oH?wc7|j5x{PNO(NLmWE;m}Tm##vyj^fA=U za9e;zC=p!q0QdlPH?kbuv;ejT6m@`#LoC%v4na(4=%&bFKDIH~d!)y>)k>q)-b?-c zCQ{=eQbvA`8{w{}haq2Q@sHINzy!bMVVIcXPPiKZH)sH4CdRtJz4x>#>3sy4KUbt6 zsLL6(irp-&eqdX|@^Hg`p8|}`G+0UvED7glM8LM>tt(aBql0W-^%qMHqWoXn_-Jd!TW*(Yb z`CHx7plTieYHei-`FgQm&=m*PDhjLH%okH_c+R}O1L9fK!YaP9VQt-0Aoj)8ktHs* z1zYU1gaJ8?xs!!szCpl#+D3KJwEjb>l$zC}>{yF-Y1JfzT^LF852p^rQ&X$ul`WOE zKXX$c1bnH!GpM{C*3a5Go|tu}R@uJF^IWadzlxS&}+Kr(%IRJ5S8ZAdKo7 zy0BsBU^Uopc;Y8kBc2>T@bBGFJ6s&#O&;7_UZ9=}wjVSa)kfby-%*z?E!Vkah_N*i zBdhQ-rAsj;>g*hKaxOu(0DTh-Cf#fz_aQ3iwMBh!zBMzD@QRAYUuQzzN+}Ae!{#VKnPyS=zwD+@0$$*+%n>ZI&@?|3b z`xGXz@8$~C`!Ay!Q7`nCZa>nAMZNEXQ6ZIz;yyHmcL6jcDkJ&6BEHBj^8gIg{7 z5Plu`j`}}i3ifFCm91D5QIGk7hvP9vxoSeTxB(}iVS+Io>u#?SH<*AZL-tTof`OXB z2Q~zoA);5oewGcMm~2Wwsa`yL9krD zW}|#AJ>tH_uiTbQqM%_BXX>}Qf@gB3)@ON2#f{`bg^ZuiK1XJ*mUk1@tGtiVc>9rT zQp7?Pqe!29I%q0<-iyaxT|F2RA4V+41heApFD$w~U7KXWad60>8&>iLP&+YazO@rY zCd`@iwY1`X80rOrz^OT(E!7J32jM6R*GM-#fn zvdOk|g7IM=O>ZbNOC17ny7Ux5kEAP}QO0WuMV|duzHO#!X#U@P%KylUjGnPkp{J3+@f-8=1$gI+F4ccel0?sMgl_ELeQa3a z#F}sZHwaH=7EWv}z{@V2ZUidR$18iU?(X>RSi9y(t=F%2{0G=_K2BPlr^**&qPluMIYayAVpPaXy+EtmsK$uZWw83ugTwn z8Oguf;-0ULkt)1)Inn!gF)CBK2rGTs|8ZrUN9rl<>ZIIIqsW}WiXbC$HqR$3JBfpZr z$k4qz$=|<6bgFp+L3PZze}C@vEgg#2$-A}3u6tf=NIZ~FQHVk_u?7DvPe^G0TV{l* za8^tcG}&!)*y~40PR|vk_mAje;-_Nm&n%}uj6U-SE1)`D&sd{GQG9b}ndpS~!C)LO zn0;e39jq*+zewy=Ktz)cMwa~1^X{9>Te2R=!MaHfH*O6cG*u#BwVwq!()9~zB>`4d zrA2vZnC_0=gJ#qoqL)3FI8h#wXHi<>=P_D^(;g`CkCPJbE;8se<{4rpUSU^7ICS*C zvU&f(iaM2mw#H1YI%zwC^Nk(GJKJ+}*n~kRUu&JFO}+O`1TA#*Mp%>M$u3T42z(i!JKO| z4Jau``&uGO;3i=}rUjQs$YEDZtxmOJON6eYls}>!_bz6FQoIUoc zV5EG2cQ=oMk2U6v2zZ{$GDSo@ae<8lB>d(mLqp1vN(B_h1BJP3q=+`l>TjfTQC$MU zVt}Fs6VQ|KkbRvLB1Fj&R9ef;c?89KXn`^>0d1nh`$ZT3C;wEy@l(=# zR4*RtQ$@7Yw|?Pf%>9v}l+9l+g}UDl^-v_0^t;)TM31jYbJ$nus#g4rK0%avp1>IE zxll@qTBz=0?PV+Mceqv-g|f=und~eyQc}oa;4M?5mK_>9?s)xU_`3S4*{R6v@V-suV@Nozd)l4iC++dZnc97~52i>}{Z)C`VV7 zz0avz*3N@%Ht zNQ0L)nx0o*O-n@S5EtCkv)$7zAVG9|>+6ba-aEwzNEn5=M`Ul+tV=#2hbTI{i8qlf z624VcEE4tVs<9J4Nmj71w_x8>ilQ8S*iVs2-m9w$2@Dk~_zgx5zpTr7pRWts=+ zpA09KuEM_)-tLbQ<_T|0J4Q7{x}<&XGst|>Ui^yIC4}depiv`UlFKAJ@Un_MT~ErU zt7g)pth!P#+w84KIjc^6xcK+z_XmspqX!edmfdNSzxwO*1CvLtgWVT= zkMb=HHQu|9T)I%{!H@90{m)4u^23Mo%53Bp*8jEpDB4e-y-;Cy5DT4|75}%80|I>y z%m_;X%`+v^omA?N`IVlSyjgmT5K)3deT@eR|6Lgim(Pk*bWMFuN2?clZyC7af*}tO z(1u72tgv1XpsO4;XTH?&SinMsh3kuL^FRLOgo0%9au7-%m)0H^>ELmdMpjAEz23zT9w<;~b1f=^JbjLDyaoy~gf*JKr zbxXygTr3Nf)W0mj#aN)l3*Q%>sK5f?=F{VdMzKxphrOyCo+Jlw2bQ;fbe8OWwM3nB zjjyThPgCd9)IG!O4KLH=@{GFdSC(Y)w+gn`a<$={7$IlZ5<9b{95-6A|XhR(v6;@ECvyZkMR%(n#bnBX)j z?HJpX2p*jdU5{;oXsZ-#jDR*SEh`c2>jwEbPfE|7RaXvd8GY8 zwq$2FnNST6zL=?29~ws2_V4`DBWSJL5KngO@z29f( zpT*&G>U*(zPaetVu9*F@&*Rriv)+3+<@ z#E`ss1J^Pgxqki4`-q09y@$_eMV0O*fxqM%9Li%W!>}qQA-8mIh6ulSP049x5HuA# zV~DpTX;#&Vzfaqcjw|FxvO`kum7`=l+9IC?do>3d?OlGBb7aoK1(Fs>cR3KdbiiEkP zpc$U7407YmM3PcUw=72BmoN2DhIp*cVk>Jwe`=D{0)uhh+_#90$|v2hvqSaY1U!Db>t;&K&y#XV24-#mGEbY=P4snvHRYuJD_tVKb!(D$8{#h zx;@h5X7aIYh)ejzF{KK06})rxxpc1q;w!t9?CF-p({oGy2$_WeV=E(4NKDsZ+ks|( zjRlncyCvp{oloq~4X)q(@^Cu!AmNI<^tW?b_Ge>GojSax*x=&5#M@qa27Unzo4W&^ zmBfuZpGaPR=z0D0uFbuERjXaZnWhg5E0jbA#@`I)Qj0NnvyEx?sCxhf)35JnBp4RJLvsMj_>18)N zkt|$ML&I{8#MBZgUmi0ODc{&mTIIlQ3Y0HD^F(wfFPK;!A%uIzT+E3R+YiL?GdqF6 z%;wUtB)oM_P1NR;w*-)fjMZB0BHM>l(}75Q1_?Jc{x)KIjSMs)Pn@O=I?AiW-q&wg zW;UI6s=b#}gYV=;PT>o?@jR^6G%u{z$LDTXdzqldSQt47Bak#)Zy0wN*WKQ~MB3hd z)&`4OIx4S5yGwiBYcND~wsN~5Z!Gg@GYnXW*fdOumIy3M36Xd{*h3(Jud|pE7^E}- zBa#ApPCCOsT6y08L9Ri9qWqf23hbtgS-)H|7aE99+>Y`K(n6^!rwXJPJl5F{(vyNH zv@H#SCi&E%l&MQo^3SUkwjZ$EHlicATW(cVP#MSdE{buA_#x8tvbiIV1l#iV_y{9amTt7Fd5AOXxL*udj@mz2PZ;R0L8Mmk|M4G4`;+)L@Ib#!{VU(J06@v|7!2)(CIt?T6nzFPJ zK1dMpnu4CO3Hz=TY0uav`X>1+2}I=O^87oNYn1YjB7$Q7pH6Sgi1}p(xNet7P@hJ; zYElVh8l0~89jSGsb{_A`s~oi0@;#sR;v{!I*`yF zW<-r>wf{NNlQNMLD3=v*@(NNa=BV3wJ%`kP`}5D=uUY@m0-QqmqfwodRxtZGKO3gC zgYXP0jbgbxVqA{jcom?zEpzHwBE#~|1}%yJtu|v$JZf8bfu#<9&rFS`)HfKHn8;o3 zD|(4R%Osr=Ynvp(^mK1ZN$Xr+$gSQEV8WAF2TdKx`-4|S$(#d?k@9slTduO`@~+*R z^n7(zWKI)+wKLa@Yml0nd<;{mk6$Y_^2_WODD6AAM= zlzzXmNu~aNJrz(L@e=N(XWi?*hdl3lCftLGnS1xLvF=fc(c~PuE`jYp#;{Q%&SD;x zA?EQCdFVkqtbH75pw8_j)_yDDB;f?Sr6SkP zCK2#tQ#Tk)7Vu#VStusLLWUF)gOkBU=1MX+>ZBvPeTd4ahSPI%LOO z-yS->C3HHfIE6*aKIEPpcQusdSs$HM5NRHwYxI>?>{-7pBw)*Ger{h;P7*)Y%c!@_ zsl-KdRUE0dcs&z;K52K(3iC^!wyt3rb$PWdo7vW6vjF>U;+%>%uYSXD|E?jKTJ#po z1+BELUft0fM$awf`s?Mool9}>(efM#s!|Uin{}%NRjIl^?BB1PzNG&zpCf;)d~kA$ z^1X6Rr+wRsqK{J44KNj=Jpst&cWu9 zn6#Q@F)-wnLuU18TDL>~@+@N06b69wPKfKnpzSbXmJ^DDOb=Qq__UC&gS$>04IxaQ zLM&n|L)U>L&r&(~SHe=l9TOJ9Sd^+TniwG;ODRgxzaBBx*l-}`)PdC(gV(efSeRm@_F~{iQ|u!gtt7|a(Uk# zep+lTE-c9Xe-D}bUts|LpBECWdUO{>poMa*;(1)sT^I^kdsKRzyYsN3Zsu^Kr`FS* zje13gO9#zPD{A#8Qdx*IVBW-tvG@4Nktddn@$jPE}6zn2S;^7S%oBPw(5>dfz*?iNLbyGPt^0 z13e#X8GON&DjCN45Oa;3*S)Xvs&b}sUc-Q5A9!!{c!Fgq(HDk&LZhLGfej80uRjxh zXulfVkC+mcLz<%9ywd{WLfDE7_{ie3*UIOZS+6cKvkD@pKQ6)bXKpc;)LOr3BdeI2 z7xB8k`BL$8Y+gwGj-t6xZqWLoqK@nFn?pm)^(P-bZ2$jX82_izle0}rddpwy9qGYV zDVCD-q37SD=mIyaP%cCid{s_34i7!~_?njOvk@ly`>tSV*(&>0Z3Pe{U?(~@=mx7X zLB`Q89NBoB7$Rg2f)CuL83_a|Xt*=6 z>%V6E*7_smn`VwzBtU+h^grb*f&|jwJ67@i?X`id7V8dIBxNrGQUn_z544yz&1;6# z&T2?;gWN~n!cBs$%dyMF`_Yhp_lO7ica4w-8TXX^)U98uiPPrCzeO6Zb9*8@X@}Q= zTsaou0-p8nD>hW3b`G*4QI8>o4if0-SJgMp=qR?SrR%b#PaQ0;@yiKemXdH@o2v@R z2?|!7ckQ6N!UpwCh69w0W_N{9(0O0Wb_+FZwM1m!aoLe(>Ty~7vNC=nrhla(pP8${ zF9h+NPm&lGw%6B$%{V0$xM~QMJKF_jhh@%aSEJ$_`vB|wxVCj|%P$?NL=q_*-Umld zUY`G1r-*4;tt0$63M<$O>KtUK5wVwVV1*`{avLn6MF(?>Pf65R*lg64BD~0{3rxv$N% z4UTIV;0=dV%Bs#LTr3EwY}&LrH>G5B494~+rLXlJBrpUn@9rp8oaTOkV8qV^P>Ddf z3wmxfkDY~q*2PGcTmX{DMm8nxqoL;cIdU@(BA&c!?LEWSo!=BV_jM&KG>_;&6JVbc zQ5%I(@`(&cRyt_th)p4ks*Bf+gkU-_4`xs+Fi(Wrab<{qy~gEWrQTqw&|&0M)~mA` zm%}ztt+*Q38kjk^|G5)+;Q5}JVx5AA@3e0H?zeITeV4o`bFE{=o_t~1tkbGR=DHiJ zKXKDZGW$tx#*-gU};s9j(O+)$^nHgbTw7ip9%_muj{3ZbotGj#Ox#P)=0xvYR@m*)?m>^& z<~No>I4&4Uum^kgDV9jGq(No1|5T-f6DCm(EL_jTQH2Mk&`6<36j*3w>78KhNy#wJ zmJeZ5+hA=Lex}X2N+rF!%_GnZ6qvXrwvo0Mw1Wd|`95ti){-Yhh!8Ybo87+ybe8j? z%PI#X8{w4+%oxCBsD7%6nnd}$AsP(EW=vN z^MF5vXw^<@7q%8V8t&BT%JPeT?Vj3|GE|H7|ck#y2J+~}>n`ldWLUI!ECfa`RCt^mm6`=()(({T5tL)1H zZtrEyyPFIvO${p&fwS;Z-MO*NDV5>L5mVe{EY!`2eT-W@2_F`I$leoox#7wE1K((? zpJ09Wg*;z@R5E zVBS|9zRPa*kZo*v=Io5E&x2ogKYy}!;guGv6p^U%>GbWxF`54Dv6GGHx*(G|h8B7z zxSX{lXZ#LOy4nxBLKVr~ZT(N|eaR;4??KOuUngkx-g;`^b+D&>vT+*1u)AqRkkQGq zDVIF08w8zeO|I=0+Xm3NvamV&5O#HJ*v-`|E!-Vdp3F^*<5{g$E@G+UF8;^yqX z-)2$pDHL4nS00B&FkzG-L6>eoXLK?Wc_WED zSw3gPxs}aJWMCO023W?}Y?j_XiM8KOvd72uvtLR-XcOS=4v~sT<%io#IpO34U&3z@ zua(|EK{$cTMJ9FdI>M8->bC0kq@}GUsV1`YAMD5K_UiCPm=4>M%hD$@ZyD~8rB`V9 zWtL}N60bGT3@~wat!On_Wc`?AWuk8UK)l*Dj2`K|c%(I~yluhmN2BWf@YjN0@P=gk z;3L=VZqfaKsgw)4hLIlE{4+IzhDr2DkLPsn(-(f8GCgj1IzsH@`R5Eha-AGcKiYgT zRrAk?AMCqj@*VKLK$gBJWj5k_ikwiAFDGw`A|1ut%W!6Mf~4w?vx~#ZJ*Ahj-m#QE zuI&^R{xkOF#F|y!8=enzWW>vV%dH{TH$D{<2;YtmT zrIr{hlq4{mXJr&I<;?9E9dJ-d3oIx2m9E8a4M#eD0tn&%P_2OEY}qoWK8!sP)0f4>nKOY=zIk+j^$EmOmU2 zc@VS&?Y)+fkCP_D12rN4ZA5zD{&kAsGfw40xk zHws;kl-u7Ub617$*d6qy(|)B{9N%>l!;dQ}_^Vq;336X;cGTJgXi!UiKlCrpy8Fhw zN^X4AeD$QaanM)~OL&MlF}&eG%$^rZ!-rx?ud>nxb39CqR{a1liPO(AxGgi4cnjZx z9sGM7#}JYN>~R@&59*NXccX=`l0N7|Un#QHoAKoem=4JJn9tV$V=ckI$sD6RV?J99 zQD7ei6D?RC47r*;D3B!;qmqRV!9cJ@Jl{_?r9mDAPQXy#9J8#3f;waD;8~~8`k7VA z<0|tC?-B&rTE`4cYEH>Yt=_U^hq5OlcIuNCZSDS?kXzCca}1E+jVN|Rp1N6+z5B5I z&$4GuS3{WcMaK+o&edn#opZ_?O*(PY#5Iuqdt>!c+M>C}Ap7Wbum2fsceUqq4J+tV zw78E1=61`HbW3bzyOUg0e_3$Ou-wnpNNa2HIiUi5fuz3mvW|sf4k7MHDATO%XnNF> zpz@|Xy#i{+zOEB<6(u{VE9B#6yp509#ZJ#{nRNG@TNC{{Zz3uMe!%&Ev$pKXH%rgEM?CK(_C?@-nXb!zGi`HRcKXhe<99GsS)_t`W00ZOAj!Z>dWb?R zPI($)XZL!>z@>3$%wy>F&})zd*sqO|W)~N#Y=H(AqJgcK0G$TO`)90gfsU`A1>y^D zT>XuRl<%1(dU84{He`anKin3>@wg4069#ud$Z=kRH~P2^-ne>MfV50HiRxK<`1NrX zf%{Z~k+9gS_?>1*e4tfagJw=pub0XaSx?dgI_d9}UTF^M*rng zK$Ni)#0<)ffK#)Ruvl9!a>JT4XrV#FfRRSL^5Tg^?T`9Q=t-Av8a*3 z@B>VG;vOe>olmi7@R3gruRGl7dMs-!Y$z^mYg>BahXdXZoL;Y=tqq(TGTO9%{p5<) zURS-OgA+MhGh5AX2b|2_qO(1E5BJsUZ3z`AUe_PrVqABBxc&Q-fAJs8rzh*dW9tOa01>{j6)+-rJid$E${?t=4I zc93xa@}}RqPB&d4$L=`!>PJ)Y?!Gd`uG5RMYJo_kfCi%>uh#m3^}dvNU-gIq#KMa5 zZfJ}u2gP?Qt7>y~{ez#mbuYoS9lkHX+pD2J3__Bwc%*!fSf0R9P*M9QsIF995~2Og zZ4AUJ%J-iB^WA}x#?S;IF6S<{7YDiM?cydcF93btGzdj%=JssNy~vq_bjB_l8V%#5 z&Aps$Tyi}_E54W|mm>A518yh=mTprj`TYn-WKEs&fSPRH^#1vzrI90`AB#BCe@n}du`=sgIJ1H))e$VuS1Keeb<(|r^vr+2bQ_VEc35?m{Pt$UR*XGNa_ZyEh4j(;exh-tkwfmtc7tBGmjQ| zcPmsgMPe3iQc_c5FwaNIv;K@D`)%Dn@t^)Lh+O3JVMuCfQB_fD(XkS){VZ0 zXmDVqW65GfHv^CxS3=34hwXvr2y$@0Tmb|za2rcU- zC07o*lVaJ6yG#hn2a^1Fjh3Mg51l$bJQ+(G{i`xZ*62|-ck@)n3*x7L{0HvU-BHW& zDF53m-uZkmDT((oY2=Xo|{RUJXNE3{OfUEQg&%%2h;30D}fG4!1Mmt7W4X}R_h}~xPCW&`%n4C z_u%e}n^@3{ss@b|_v9+*#)D#{Y(80-2rx3UNC;TM^(Jt+7b4J#6%x_JaxjuEfD<;` zE0kxp`>FMqwE7sYd8`Jz#NK~bj5j8mc8Xw$UxLYoeZ$rGp)oXy#%6k9z37r_^yXgo z-tc@$)QDRZtpr0X&A3Jm>ka4)yL*AvTt|zK-^>H^T`U`&om&{z-s_FIS6sZ~Q~73_ zLo^tn()7l9>9+c}^a`^D>5Rx%mw8r630kTC+{@M3)A5iNRo^J|lG~X&;8BX10p>2( zGBEYn)JoQe(-xX$gy%(PYm2iT{z~k7NjVz3F8kH=fn$mmo>rZqeh1cX887QW1>IkN z?wq^tBW;OTB|PxT9$(1MJ^#J|vW}>sxu*Sz+BeSrbz!`OgN7DJebI8n&>OMGXbyr@ zX2>KCEYfk`;eW*eOpi7qQb?xrlrw`Ty2_Nj1B0a{v|sfi#JSXeHCI9n5u3rxvfxXzQUO7NB8tECjl;Tyu26L6hjBjs>p z)5trZq#@tPSWGz-rXiP&_~-1`nL`=TVC*NZ&LDdXd(;O_M{S$00||89;%dpp-NMVfGCKd5CgV^F1m_hUsjUPs|bQ% z0}&~T8bH^!D@iC)6%D=!= zL2s?%PF?xS;h|*L+t5!f1?Lr;+B;(3iC&d-Zatp5{A%dD!<&8I?^>L0{`KbWw{cca z8Y0sEO8Mu~qxJf${;UCW((H=m>$cYR)SOsdR~mn0bzPy^BY3rOaCKqnich0w z2Q;6=)D0}FaC*446|_%=$urZr!OU>#S#O6-Os20n(yNIp)JNZ*_kOvb^e=G*?g-N) zlo@Uk4pTD|7S+1~5H*1uKNB;^e&M4Jw0`TX9F%ks!KWPt!NzQ?PWD7Gzc{BD5+z_g zCW~o7SEtiSWpZ<4^=i3LUMvVU31@QYmQ-u14XKQ%nNF6QqnpQMa$wO~x+UEzos3_V zEC?0Ap7_A7@#DxDt3=1Fsn&G(L^?U0jDrz^4}pEQ0)xqL#e=FRzaIJCa%Rh-+9Q8Xmpqh-hWcYnA{*}SyqeW@ zrB~_RFG{pmYlCl__r*FmeyX?287(*x&(Haq=H(io5|_Af$ri8bd#Uub4iDx`>T6iu zvEj3>(Pw(LJX#3u)_EE?eee|VxccvhUs0680^U5=wYL8G>Zj}P*ZpFkVt`K)XFsiQ zP))z1lCFRJ^Fo^0%u2JP-_xE?IaMx{wzaJPc(kXsW&g+TvG>9aUo|}X@96ZlwmFuw+2VP{C^fk)8-{Cc$hW$rt+~;lKg0{H3~?WaU$zk`+JvyIU;ZDlw6m$RdLR%pUn)Nl01gqz@r>?&!f{A3iv9 z(|BeQAAU)Tr(|XE8pRx6+>+MY;}EE@S=2A~ge6rrtfo>kv5X*2n{PT|W7Z^=EsH%m zAtv`I*z`#suLT~f2?8msDp*tkYo$UtD@)K4RcC_mI!hSJ4$2CLl~7y>1)o-lFIV>H zwlV8&>%(i6PQ>b%rNK5!T0*VWygYhmWoJUD*QX1@RsY%{U$!OVr^ohHxCF;}wVqdB z1OEEz*pu!lDQwI5X)x|KxW)Z2iAyLQH(K;H;3@rj8i+w&flh5j517fNdkXKH=}cs# zR)I;}kNIPLhpE@oh#PJMJcolSJF`WRk$O!*UO}O8-V_&Nw*|MvmF^xf!E>8*{Enzy zeD&3=^wqULZsGrEG8}z%$$p{4t77}Tp(i!6Z*1<4 z_0e_LfFlxhJ3MJ1T#8BR1vZHj;7HCTUalvn07=Ap*AIisq`b0=&TGOK8q2&c?96;L zo(&N~PdkZVeD5@WI%k2ZPccqFaVU*JQnB8GS6~E>1~$*8{i5)x?$Q zslo}(@s38X-!`jd6e^Nb`GygTVN+@NiDZrl^tBu|)zUjJ*-7SW%t41zSpw`=&LSwq z*HmsgCYz@oc*8lsNoJ0Z)5m1bMMn(AVzSef#_5np z!GF#k6IY2buiiDwGK<}1nHgP9fbJHpL$l*l({e_1Gn^l5o74KR72?UG@Cdx0JIAUJ_{-he0x%>9`i<1`rx>9ff+Lx8~n@cJBFB3VG zc2R}W`13*%?eEU}!6~Zxatxy7n%seL6-w2my0^ns>3sIMX^HdcT5-Uk^E6yP3*1T} zun(g-B*sbQ7JM`R4A1IV)><9q2{D5lXT?7sYhxTq0X~~%)A=}i|CweAwk#()kEhwR zh8@h)e0{3fKF2uMKF^B3wmAr(Zfr77v$Lu{4A5`lIsO`W3xQ@hFh7`Y%$kZCSVehZ zy*=+vbbm@7kJhxDhEm^Fbpj2J#H;8y)k$kYF>!9M0Tib>32!@q>iR=pHfdXKT>o82UpfF$mbx0{G zoqM8Z+rJ7)Bc=MWmZMK?!TrFT!s$=XrrNsLy!|5e8XblD_w^|yONW5aF!s)!^zVlC zGxF%)E&dteAG`&Q(iLX~LGi;<(xDNOB`ws_D#VvkPG}hA^L+f>P^q%SVbI~htGXvE z9P9R#kl}ibZ!$)}^p*fUwa#tOH=Wx$-!u!V%z0G8i0U-4X%*swIaa*Plpl3WYx)3- zWs?U%968&2Z>AL#laBWlHg8~kg~AbZn>HTv_g$NoYyM#Vp=S#zI)|sx?-PWrs6m;3 zT6riC5Coff_hw|z(Chb(#f&oO%N)*V{r%DMSNgy^9y)e0xeMfz5)gMf9zhkDJWt8N z$ma_wIdBGUfwN1aw_}#J1@7;fYi7_Bkf2~IE5Dzm6LULfd@qXSCdx$FRL-X4TVFRFW~R{Uo|aCj&fycEPql5i z{{7F_KAJnHzUwL87_V5UpZmOrIAgG7DM?qc;du|KV zFU`xx5z8`+hsn%z-QE@;3tF0ToyGBt|(y_gP)*m8#9>GtU=gX4W+ewybf!Nd8vV z#I|+Lr%=d$Fc?}*HXA0d2`)u(uU^_&j=qiBc1U+5t9k1sD1pH^(Rl}!Y>jD*dd_A$ z=dX@#k7^x=Zs$oZZ53>dx<4a-AeUc*k_B6vc<6Sw_+HD*JfE3`&JpaKyMz`lfUf|P>UgBn zf%yl3zc>DxIv(S?8DTg&K1^WnjQZDD2V^{bkb6`Q4CXDK&RYz41t9+c^KYzU3(<6} zBiSi%I(jh?l-|`H0?o8>%`NFm6moPZHGRo+(FYU{sK7nHJvJa60J5M$mNS-kl;05= z&B^2BS?W3^N4xrKndFw^sVDf-6%^IUeR~pOGGj9N#E>yzETBJCg!2Q zMyE7oe;EdWQ*KORD5ZX@rGg?98W>@Je{^2uO6$lemkRf~_ z4+70vK;7aI0k#D~GF-0|bZJuLDNZpjgVLZZtQ1|s#=PBEkz2si=ZtuA-OsE8FX&Rb zy$0R>yZy1Rf~u+9g^IWP;aJM4NV_bHj>xG@9U8}2o$l&)!0!O)@%;cBsgPwb`7-tD zPFBD9E*p3@NwPSbR}*d_PBf2BGT&w0uBjZp8sFrn`@#MeE-6>Tf6dTCn>P#3zoeD< z4JqnBr70SYX?-W8K5;KQKe;D7Y~A(AgeTJY*7;;bOTS`1aUjP_@y~aBYT}*~g;;T+ zxshTzf|P7$St;y(HLI`hcpK-= zUG!%iLfFAT0O$!gF(HOWq|{L|0cx;*R0s!n%;Im9X=d?;kR1ff1txNkN7zwer}c?{ zi!bKi0H`M@_QaSQP^)@HdY54hRp689+u-_XL8{QxVO5|H)gP!cH4erm`6+_d??c-= zN(kr&&uITW{RvOXe)rcKs5nG2#YerJ%Y-{zxsV0H#`48G+yNm9=g5EWWFTmhCO6>_ zbvdhpMixSTy0ebAJ7y`WRrR~1#q>4-QEI`VT$a)BN+gG{Kus1*#{eTiZpO^;jv)!R zk&5}m-%RkqCiBeXXkv92l_doMp?`)qAJl;CBIx}om1dt*Gh=7VyVD0KCYip1qMyPp<{)@&l679rWdQMMYJ&0@9fRs^H#_lDO-*5=G&IT@ zuNhWLnUMinQx4>ygJ9!Gu!H7TTwghp6{?e{BPd5>sl2Pz0ag-1xsu&>UiqoV$8j;Z z#SWbXx78n3M;>Prqqv)}14-c8YsY(B^yj{%t_WoZ2l+FHZ-{dC#Z>(CC(rddc{0F7+-2KHemH(_~VBFQ|gq!`jtK}F#CHM67 zpSX(WM8@s&Q4vFYB@~mXE^^YSF`+tU@RVnsvHpPAsl; zwGxZx#H9>OuqN=9;YYnk7Obh=hwl{n%ItZ@r6{07jIFS=D%D_MsC0c<6jk$-%tu8> zzej`)^@;%IEb{@T^Xcj)S`%=agEciFt+jTNOpfj73MR!$v&dw=5NOOi0>|4|vK$XI z=XC)VFmfz&L+iKGc>xF4@39#jO*$3J^!tAGoQ}Jj_?(V|+f@yNZ7U)F#m%ksT%{X8 zo@G81q>cGXBMI*OAIFDm( z*;EuQus>Qz+=@N9jI}?^?RIxpqDDnUQFLr1KZI!il^G@4EQ+f~!8$!#Auyj0#M*i^ z-|4<6^Z8UObh$KSA_1+9h#wPRGkkrmX~{m0KFOl|cJBNHog zhtkov4Le4bU3BmHMekb3@ANYZ_aFIMTU_(I({$L?YX6%Y^;gg$Ux2uIurZ_U3&nWNqtylLeQme$Wvo00V9U*D$xgspS~e?4;Nm5p#G(r8fR=)4K2gN9VTk3Yy$?a z-xWTeAn4j>$lbfZ-N!W)yvr2Mw8L0q)4S1Fs;fu;yLb2Q^?RaQF^meek0z9(EMjT& zgNNU?19xo|FrdN#vw>`?JWyp#E>SM4qzm@8$s~LH@tYw}55Iv9iFws|BuRcwoykWAWFe8N^D++o zx$=Q!sD&_Ovehhd$KS^v|FhkW?b5z2&N5*+^`CX`nqJMEs3Gs2vemaVNv^w4Z0Rpu zp15(=+7GY4j^ckl-{A3)6;pSP-W$f}OpEL6-13T|Gx6~@o|*-;jgfQNswGys_xd?0 zEWlpZhW``l?Q@|_l=Qapg7v(entSA}*D*EzJx!V)j=G`DwVf|GS&Sw5NUpQ4%C6jrs2Hg7zBS2N+ClP*4ilf6TC z89yJLjMiVdjqH!i}ALe(GRIgg^tUlc4<(vT0w8~f!kT$Sh zB$**J=a3!VOHg7*5na3poinX6U7$R8!CK~;_oAW6ChueD`j1=0`>fFyTEenI!MAIx zincsBRuq3}?b!+a%8|DBbL`J&wu97fE_7;*|Bi{8>*7lVt!r;Ro7<2qu>2cOUFCRL zv$~pR6m7e=?={(|^JrJnhK;`}G3ysDMX%jvrT$hB=TgCcs8O$z7NU3&xwrNw=qw|6U9o6{f~O* zCZ)Z!5n0^P7tJ)`yWh#6J6?|?u{oy}75 zLF)1hsh{jMZm#3)CeXw&=t_RVluC_CEsg0Uqyy8y(ijfDqzM-B4aXZ~0s_;k~8$sc^rEBW$2P|CqGB3(3N>X*W(stqa-@~(w%d@<`oDFaN_mXb-?`_sB_5)hZ z`uS<$MDI?u*;|&R$Zn4ppC_aiW!@6w7~8LpCF~&2%!1!q9-jJ@*gYM(wh#kj)shW; zgo5T2hNU&TDU!b~JX|EzVkKiG*2kKZMFV<8LJIkOgPyTHA*U33|kNu>#cY*c1O z_CB{MmV)w;+@iWQiB})-fLjTpjF7jn#fC>}20)(V=2l_;0SzqbT$I}i6nMzjU;7ZU z%!H2=U~^P#g@vLPP8y1~0-0S)A-yhbR!CbAeQ1OsQDc~SNZl1GNq+@hG#@j_o`zN$ zc8~$>w`#U1njm2-U|}n^n*nR&Fp_PdK^G-Tm}axplQ~C{6UOe7KIx3zgE5!`D{ici z92qxMklxcDk=o>Tz{w}Q)48G$eMpAR_PptILb?a6m-f}M4Nyi2__T~P2x}|g^Uvth z32E~>Kq#Eoc|Ps+VeGyl<7c_o^brN_gM1nNgRE+S{$|C9cZ>99%cvf9Z1-emZsdxi z3FQG_$3!Kr5^OCE;YQi$9MpTsd}&r}?b}@9X8OQ2jFmaGDa2QEIuls!yPd)Ul5nFTon_+W|265Ut-| zY*neiSBXS)f`(X{q@rO+)X40kSjZvWg(x96Zv}wzPRPX&t%tHhohNE>Q6~+#*Q>*z zrOr5w1vC|CNG{uYi7wfipm`d%KzG0>1NBP-id3A)gh;3Gkwv%en8}zwxY+zKs45k> z1O-RMqDeT=m4HSSs!tIeMwy`tFy6cprX+1}sv_`9(%P&@y(3}a zQ?K@#>aT;RcQyPm_35qW%tU?I$Y=dU3!&Rr&Nm)E^i~!AdYr zZ+Z_+u$7?Ivut`aWT50zdwU@hO?|9AHPPRHAXTEtm%iw&Tc#Q#_pTm~^ILB}J}vT# zz%f{%%+|hDc>C2t*7fpYiCxql6xN0ISvv0|=&yfN`5j`5HkuNvd&bebn}fqt1rVVW z8zoe});hz5P~+smXMmnV>-UgN9q}u&)8UMI(oH=0r+DvMKN>laP<9GplDn!#`7?L= zU*;QBLVERjH(s{qgeW+&PXpr0@JK;}ookyzb_HRz@^~l-?uD!~K@0eC&leI56Z8}I zPXJ^N6e_d%N@b_GyHI4FD~ha}NC13Zq~Zq*@7^rTc*gVS4-585?S$~Bjb^hOug)@y zl^M+xGsP8RtsJf~?+o`xTDL;_-yJGY!j9lvbq1G>9r39meWAaD_n3e2siq5bMY;zu zdBwQV*nMKE+HJWtH8gsGzEMS!d56l=W$Ci~ee&qu(SDI1Chznt^lHo~W}_B!74x^_ zKgTTNmbIO2`&RjwG-B;^X~ngv=e=38I*M<{$-LYdUn|enNu1B0#;kYFq0BETmR}+s z^m*!DvbBOlf_}r;p0<{gUvK6!MHPj`Jc(Vra)2E_9&R8}DN%*1$lqPOKY09wEwFdZ z01lKcSUWs9MPNYdIIlAk2>5x!bpC}tZ8q=n8%;PZqxOv2x*D1R9ZrxE2}x-{9CqIh z=03+H&07oO@&ivI{-uzHCWyXP3@S2-M2io&yWT$lFUpSoAEyg8o4}K_Y_CS^_l)f~ z;9u?3S@nPU8-u0{fQ+*D2(QL&--V{|;Q}(!Onnj6BG{ z172__B*uS7Q-BGzfCL8jBf_`s(3J+_-2Jl4qLmW3yW1{-ggr1pf>b@YU8A&4#kc@E{GLW-3Q_$8%*0hX-|zJj$mp{22*=Dg77jEIPVAP+^E zwaZD4!D(a!n=Il8Py6-WjdxdN;$5SseZ)Xj!86F`MXO?%8>!m3I#(W5kbEnq9GNQO z;TQ-JgH+%VK251CI~a?x0f!lRvs?nbO#49%3`}q&Wwv&O&%un0iolf_*5%k>vm5fD zUb&~-H&sFtV9bSL>BGRm<*7(;nl5`TuW-+I@%4V^RvJJ_G#KxCn(ZY=ldGJYQ;7Z=g zj34<|bI#>({idXxG3qyah3Kp(KzCB_J=1557f9ijeaCdX3E!{v!b zZocWu9|&iSSQfrx$)TG9dn`M6Jfp%J_O~4C1z)8Jb~y-RE`YsSCct=22+W05Fv_$d zokkC!`>BxRmS6o@9XNGjhUh#-6Q@q~wr6!rMgQWrre5D ztD-Eo8ta(mBVVaZeI_j@nUkL3li>@1G(@0ECnS5Tsud+>w&yBtK4gi>CTqNM)(>9& zD(<}V;;+tQ^mBeXc8|8|7o;Vmyl(Ds`|`eE*d&|zVcpDW`Q2Nb-TL|;mrGKvsLL0T zI4Spkak4W#dHZ%pz2bc+vb>|7R!G0UCR0A{@$hLR3&B8c^xzrVu1_!4JFQS9rrmr* zG6_?@kb^J@lYW@u&D$2Pb{!K8pImGH&`!WZ#t1^Sl0b1uL;e`q@aX;0$*;$a%{p;r z$6K>|P8~X*G%h_+Q>%*SBT6{v|B9q!sS+k!jKE;Aj#Af+ToJAF&Q-f=OJUKHilH{zBPEl1NHHB9>IpY{OjxjFB8~@-bh8 zhTwz*J_hmj#@O$2?Yv)FdNN4qVSub~ud_qy#-5>=#U#9Hx(gEgcr!;R9PB(tXA1q+ zN||^Tm8iYc{*RiNOlcPpC(H<7;(VkuSN_tF>PNP{=^6}bI{&^R>B7;AIX-Y4l#oQR zG2Yz~qp|RjjHML4qH0Fi3vm%ibtH)Kf=yv>u`404u^*u#TBhMiXIPlE&T%0lBa1Gi zeGVMay&)t7LhL}xlpFr`^hN76;r~Qjyb#|sqI+?D_=J#xT-$eLq4CK@F!5MFvgq0t z#pj@fkrVaTwn!&mr~f+SY(oJ$JxvDf`?pT7U}s^NZs65c6e zZEm*CrxlVj{QiKnPt(~w(_WvyJPq+Epig_=y;tNxJ|gXR`4AVEwrHmd|KJD5;zgf> zN7k>0`qSz2PE%$^@1usJwieZw zIj&cwjBt_#vTN}d8b^$I7DDpri`VuBH~xMBrXyO85$FN4-n)81pAwOT-k)Rab`VsN zgrC0X1#6ihb6JE5{xvWFJaKz{c&2daTBzj46X&=|7s@V>kR<4W=hcV5fWx#t{O!pR zI9Ll@QkI4{@p7Vg1dous;nY%upOpr%u(1e6YT=7S+uU_Jl@d4UeTVnUlKaJD8rv7! zAohfDWlWd0;i@?yiJ>vziMgNykS)U6YXSsYXM>%j)NM#M`Y!@352LSWbtWrI zLVP^(UQ}OtaXs&gOOST?xeVbV&6EEePclC*+4FbP9?6I0o8#6zJ1Rf8Q+1t-@6vOr zUYmA%y;eL`EX@79Ua|Ne-zn|?V%X{+<+TTSQdZSVc;+k5zr{+V~SxsMB8{a04x zPH+49zq|LXc3$Cb37gqB_$1VK*5^FQPUB*6bJrm5#MR`wLPFB*onfz)7c#7m7rkyO z4E|7=S}@bR(sSni;>s7ZQ_n-rCoXSuSzY$!$T!8Cgunl8AIv&?$<}iBjNYT;$B!4I z^}E_KOPMA0ik#NpW5vO80!|M*&aQMGFg)R;kt(Pg;0$A1XNFrb8&`w!{d@%(u@qKE zDOZy%*iqJ?F3o)5aiXD4 zIqH(AixEi+jZ4>E3xG@Rc96OgemCRSh^Je8-fHh}D|Dl1p2$A_1~1PpZ^BD+WY)pE zoz~McwdckDJ6k@u z$A~#1(*Af7Uk6}x)UbVvqaB79v`e4fIC%V_Y42X=@woc9RRPA>%3EUhZSTugJ4er7 zDO-OIOA`wuXoC2 zbk%@O&(Rr^*vk5|7Y7VfCQ7Db&t_De%_vkg%kMaP{ui~aU)@#GS&wX#A<;Kweznm7whnf6#hq$nKJ9Ge*-`>RT>X^YWzaud@i4dGH2!bj0Z%qI1bd2J zdNR!Z7sR(3sk?`DSX}eZVi`YH-sKl#;yY$G2JXDEce;WZ*sb2VnS|pbt-l7`8$D^0 zz)nw~Ya*5_eZLDMF??<)((KsLzxXi)+x=^d?NJTF++z8=#eE5jCCa7tO6tUN53-7n?E9xZc^Bz^*Y=+>%zpfy zflK{rP%k#TbEh_9CuN{4#FG1mWBty_vfz(1xBhwDx3f~Q_Vkq~!~W0CZs(tcz>RJG zv5U&}U!pE?ZB9HXYr73|TyA_;TZ3M|YpfP1ESgv3%>T8YT~tJG=efy_N(c5Y*!c7J zE2RAfFCxsona%Cq5?6LCX zJ<_enp%R&fNrl8)KQkbdtP(8b3pBAOV&7WCYLpc_eNgcjVLEjz1&>7J&N(tfupETp zSln4}w?8_AUK7BC1IFX zSeg1~jj)oN`J`%^uFzBAZH@UI1}51os=r@8iNzfB_Ztp0pt)rVzDy+8cpK>Eg0Eq*>X!zdT6zXDM)gIUm-AY2FHhmDDDCPU4t(TVfRb{6 zw{q8#>Pb;vohgwZbd^rOYf=ML{!6v=mlQEFhI{U^@7{x@r(16r(T;c^MS7qE@QxCH59hsx^xQNMCMi7t+S4?H^q` zOs>xC4(#=I`u67An=4l~jp?h!`Mj}mIaK(^I$fEbEAXI zah%@RpZ(IjAehepU0F~to>E!nu(gDzQw&69>_ok-ab{1vJw<1ZlkkG4Q#w({rh4a_ zRtBhWDjmObM)MuL0R3l5p;GOF*hYoj{mw@%yNvgcr!6;LMX%r2&YM+$kL1bIZ(Zqx zV2QR}90wgsK9RxP2%r-MTf6_x7EmNhHp4kG)1sI=p5G~+`;`KdNKin(kPV|m2`2b2 zF)9b&B=`W6;C%E0>D4Bf0RT(X!yG1QKTa@|+B`2&gHsTFUjdLyr3zOgc)0-DdDI)D zSKiZtrX)hJa?2+&YSa#$@nDE(L6j1#aiX^T@ZdnraQ-Kn1rbz`sE^)2++3%ngTm46W5-;e|dLKUGD6nv@6OKzfUVtW~Ux4TNLc^`8&07`poIW*@h-L z&k*0q$&lC9>vFby`nGT&Z(Mm)d0a`G^l5-H{;*7u6J^@5KSO_`>BFH-q>H^%!E$YV z%*hWYJNOM539pCli4^+t&*_8Ik=paSD1lLaqik#!(>ov@+ogt+Vg7Bwujg=-9lxK% z3F|7clSW7v4w15?5?p3SjD3lep2)$;h5|zFVXL91lqw{z;4a>v0EHOV&M_c34~3IRdu7DBULFL?k!HbXVkdnP3(X25$^; z9?7|Xq}1g|o|KH%?=gF^Ht@{~&(ISwISi8Djk1du){#!G-uH|1;EkS_AHvV@X*t5* z5@gDyLs>u3nDfx{s_;XiF_@E^%nLC>lh+m}M44S_Oo&Z#mRKO)DW+*v1;DGfNZt z*$jK3w22R%P2N`&ir4p!s-E&po_ja-vc71|nbY@PhZM9o6sVr#{9OQN6#UKO4rAJo zr^s`Z=?GSSXimv6yQ{rpLBf0 zF+8bL=-2V_fP75pI|N~|k#*iTn9-Mo((I7ry@IyO*%Gn{=D4~>z$Gmyu0^JH` zMK6Vc)^8!_iMAQC|I!(qZ9nb7N>(fdXdN4DtN1tAF&`cHH+rK(?O^ylP?KQP9~VIG zIW~$FcY-9ag6+WUZZ+_%JJBCm!KYN%IcSNyHMvZf$)K0A3m#H&IQ4WIhZn`SwBs@v z71UMCXo{A2s)az1%65r6da{rk%*8lDwIJhaX8r2);66un*-jgRv|VRwKZ8&Z8(i>e zUGS^m{=)?6**taA8!$gORL)q@2KSz}zM#Y04Rbr^BG&`WZnI6E{00U=2>4fA{40&KJPpyt zj<5DVxh3BaJ86Bpa;fyzjtf>lJ-B>uaRA|7oLT=*oH@?Wily9?0Nq7wM%TPyBvR2m zM8~;0rd;-my~C~C^T%?vv#ArbLgiC4ySwe5tbE$|CY99qPRH-%l}{Jv^1rWr`un`Y z?-A!e?T@&dNJ zHULGdjPL%*O=?@<;i=p03XBo25yyyGJQimyWPtm|!bu{tvW}T$&c8(fmkbdmhPq}p z8?E1M)`wLr;QH;>{`S`aEG=Mu>og}Sw)>bMhc6J?MeSr0`j00#Nu>~woOyV#Ai!cG zSceVhn7U77EHzl6<9u{pJ`UbU6cC>pW2#1{qLeH^J`KY34>iYrzdYjBkf}EG?&VNah`k7vF zx1rq8W=0-P(al$P@3@9-4?HXM{#Gb!EUF*=$is3Iw2G#{b7=U#`0bMfVipgvU@HY6 z!eteCzt~eFciiu1uae0nO2bq3K^UKk4n*Mme!W6A8J=V6y=b0!F7$x(4PT<0_Z-ZS zf-pnngj}9GS5;9av_KFGUYA9UWtKsDovP466|q2YSuhhpi27*#iaF|Z%d`8lyO}ka zeh8D8sCuH4&!>Ue_5>toBEg8h(@}oY`6@L@u*d^8GSfq}3s z5_3_fd_{o1Dg!N9OGtIa%(4v%um(1e2ZN- zEM3M0du^n>K4o9iY72cw$sP%;7KGpsAr-+mbmJukoH6*}!b%Z|K8J*FFKYKND0q+I zkoHI+fm5m9)txi-^bbNQ0--3Z1T_x$v2W&l^o~dgjn~U; zW8g6e3Q^+M)wG_&A!SR->C_k+UVP;pdKjiAzS zKHTHGu3At}q2PJ;D#Mhzn)vNWO=-udd-(EWob%-1ZHkdj-IwvN+)x^h~jid8P-~EbJscNc@OM(~Sm#%HdC@aCPXw~efyTHSG zDf)j7>0gW`OW}-$`c1N1rDllyd@c3=+Rw8jx#MWBiL^_=4B*nsi6&A%9%-O<>Yh2N zKblz(P0Evd*^uUY9dde2kI7jDO{Oza|B3YCUgLr;R!)-%1eqs<1-ulUs$1>Y`<(ABx4&p1iDCd~BOp_X5D(#Q9qto$z-qH306EeJi4_ka?rG!9x;N#(S#U`wS_Q3V-`E63h*nN5^v}u#Ql7HdqG1HMwBr{kPcBmK6nXZDV|z+sn<^EGAAFTN_U z*Wn4zF@H}U? z@YY-faumjQnpnR{aPefpx0pK~lRx=<h%DB;?S@3xo9YW>~k(%$OVd zeNAsLJ>;gpnV=Jst1$v)G|VSG_|FJfnIV;PEI1#NsmZ`DqS$CS^PEsz(zCwX04oGOC^<;v$ho_?Nh1KxQo z8;-SRF6}+O)wjLy`^JGMb{3z%*zalO;@kc^P%}7eYEWKhl2Mg@knp}J6oW+P?6Xmv zOU*M}y~u6weMW#HJm#$G!tOPS09Rub{`=(_^ya@dp2~}N^}*!a7a5HH_BF1cB_Lx3 z=J$Ws#I0)Vm1+>uagM~b51jd9$`{MA4`ivkwspl-@T2yCPi&nmH#+RJsg+2IDkMEP zZy*U$@w8!?ijE1pg5zBc^V&cdN#@R*jMe$a1tJI;e?F!wDt zauS}Rc(zM?AINrAZiU9bH*XPSAYu2X6zM+{gd(IYXEvKL2gbrN z;5}VlUMUdVdal0E1j7?3(6cuSvdoC3g%QT#AU)+Ej!+E{GQxRu+?O#RHiq(DlRSjO2_ECOO92TBoH8dDUo}a>7vpw#5c47xyMjN_V}kA zA8Ywh)+RTFV{Sqz$CAnkG;+RCXdkF&Lu~7Xz>2I!>cUllngyZYi)e|J2sy)3(aP#~ z(qj`90J3vX#t)M<(kxTE%`{lWD!ylJ(oP74u>?#*Akl}#&|I{crB$5P+il48nPt&} z@ER;y+I%-W6;s9DAnK)xeb|;i{H@Mg*HIw8ajeL&Nu?SYutd3SI(d~K&=-#F|i-OosvH@&SG>v^iGmWkb+i3{8vJQtfK zjt5FQe4<$|R_7DDcI@CSas1_Bu8V7Fn$42gB}-f@N?K#IG}+|2JTkMPksm6tnGmgQ zUaqM*b2aS;pP_ou6AE+f{yjT%|I`V^(*`D%IHv5D-Q+(W80dfe_{Hjir%wk4ertL@ zFWEVb3i7c(HR;XV>s(j9bg5h+ItH^9Ws$#Q0ZgJ3W(tiehr|%uSF>X z!PMFVehq%Fs3atZ#)om~G{u5%{z(WkWETIHk{b!t_}lTG-Qg+9TS{Klo16W*O8tTM zkvXz&amM1}b6*ukHLttZ(l(Z8#a;Ox<(a!$d!(#oeV;}D@ASyVmcv<%tGGiB$7{BQ zl#E<;-4Is|{#6mW4JVCAQq&Ic3o>R%3FKJ)(9*Isu-l6Mc)^by(MwF>$@|3rWE{w%eD z_Ep&aJt2A&Wiu>uMFY5`>vmRmf=4^1DN1Pw)^u#}-#OMP?3CmU=1D;_IEZI!^YoMu zt7HK@wi^I@vOSBbM1Zq{PA)1<> zcKej7AI+%X4)8`Yjy|9!iB>i!r;@{iVy4-8#4(#kT3wERg#EsG|`A_x$9y&(xggCf@ow5oC%4nP(Hq2``#cJ3k)i98Z-juDEld;&kHE=%xmdf8Fe4l86hCAbcsB8V zpoX*9Tl95Ox$K$d1o%n$0XZ%w01H#0~WA^Q0 zQ?Z@&E~+movL(G*alXVZa%OZVJMdRtxw+UnirwemGNgrWTcK!I1J9n_(|9}L#v8cB zM42p$C>wk1t{vI9USa9P!Uq~Hs41jp(%g+Vr9J8%{hs=M%~X>5aqp07s@U0MBxFzQ ztIxm8d0cz2zbf_h-)~mg&z_oNzP|T*UnJ>@X~=AK>niHD4c{zGQa(fWM!mHbeY81me0JUG)I*o<;Y5TQ!35O@B( z_#xZYhRt?Ad9m@<7UlsEk^r3>oSB-MymPKau&b zw+xUCn4&#YeWL4rsMX~68;$QS>U-|GZ;MCY#LU#o*~}M4u7@IR=dZC9Gxu$S4g&ls zuRCdMq^8OF0NSU=b1^~!??T?$Wru$$`B-yoS~XO%toM-7(%y3QiY`TLMrOTHnxm4s z+1%NSTdpkRtUX&+_OIL7&GA?Cd!{kIu0AsQ>yFzuX(o^SzW&|zAdnlG#r^%5EiLQk zZ&mvi=_N8h7pl&>wblg|`iNaS)p<+M;!<34T+^fBH~VR`dVJSdV)+~9Uo@(oES+zE zk1DaSU;O8uO$v?XyYku1e;5zU}wBx9|1Gb^Be{?RVX7*VkJ!W4sTO*Zc5#zn+iB{n2XAdn;aS zXl6*@+oyZsc51}J3-lA>Gqsp$FXk9>SFM?ff#(ddrB}qlsX{e~5C@~DqSaF|Q_}Y3 z#CwFzgzy>r8RM!s@-4DLI!CS|cKvytPjpH};e5`VWaB8=TP#x~O3iBGYzO){5#8I4 zk8E$BdBYkeeXs1#96pfiL9nHiqfm|BO1x0GCyfVreMUFxj|l#<&bKoxJQS&YxU!*- zD~7v-9(_nn7GXyHn&1HCa7g4(xS)>=)O4&g@$QhfIwnZDZNi3LX3!NI(1gU9WFi?R+r~w3s;x)P+MANshAQT5`R&jCZr?8J=OhLy zb<<0emzu8N4}GnkZhrBu5-hUQPmeh6N8PqavsEowgV3!`$~L7 z#K$+miiaGhA2vnazwLhIT*+?!3)8o^SK7UhraW)zikiKBbh@caG!A~Z{M#928l^?B z$?2xa&@DFO&21cOb?Nk-_#Gj=`Mob5H(cVBmC^c^90I4V3@OvFdkoSC^9=)>+5LXcaa*$;s^!KHhZKf zqv9up+ECT!KjFiR(n4Bc8KIA8ktqz$1doRAY9~5=dnV@%Z0NGo(%0fk}2jqmQ^|A`@Ya;q=_xR^kxQ8LUt< z6d-KCL!$xbOye}5)pSb>k%?A#vUrFOpQo^pPUwU1l%38_<54kDUe3<^g&er|gX21F zg?J5t*}nqSV%QF&(WWYXH*2Ufr!9O!&7!VfPJDihu@KMQg`zkMch>G;U9VIa96z8@ziN!gRxUd^ zM@D)qzByZxDh@X64Xsvn)u+)72PcDXv8`w&H|5T8%AvE=da1Ud55V1@m0>q+4|Lk= zjg0AQ^NI9LlE)xphuTls87e(R(8998(oIlZHOh!|#uMx^15wsgp&$8HIQIMlx58ZR zP3kf+Nwr|g;}&(9WaDG*Pe=|>*J$1$)(-5l6RNB!`}gVd`DNFaT~}jJ-<7%=d>L~^ z^2@@BS5c4{1i#l->>xO!U@3r^lW*?8CY?}zx?AkrCom7fCONy?lp7vakeBiLy56bV zmrUgyV(cKE7+dg>6%JdF#XO&7z^0HGBG~8?F=;gB&mNC90`L@(mBY-Ejj9EI5e%vq z%ramJgSM#~YJ|E%Fk%RRx02G~{#(`~1i~`oK^j$BdT7qFa8m+3q6jVI3G`+e_^d|= zprw+l%7!Gt4jo3iGdb%Kgf|RAXP$F^&+<@vUx&GS>VXi*7iyMPC} z34i^3{7&1`3x`cCjPssf-!>Rb=PWkg^KmBRc+-KCv}Je30&zwAY<(SGe%J0Dy}7i$ zvVY=Lo&A?`^KZu{;vOEbSzcC7WR#)SQuBwmtbi(JsDQg8lqaL-&(Sb|feo#x59&P@P=-RmFxQpf z24m=IpTCmv0gvPI*5dFS3@x(^t!-R>XQ%)-q2h+aDxpS=0M5fRZ$m%4i zzyZet2|NYakR!v&?zkRquE+gz}66ng4}K`pJ3S58RRq-pj=vRX~wq_ zfJZ%;7~MHR^ojmA!FT@GI5^T({7&;8c#A zu-1Bh(Rz2#)-Hek7T5Pj1Ii>O8=r{p_G=F(ByVlu=Ra0=m?o0CDqCx|Fs2 z+R#R0ufW4!tWH}!+5FvHV9S^D8`H)=`^*2h&M0YkR0G*rH|o0+VtrcIeM7A1)j8+A zGo*g!n#&Fu-}=pRcug+t=mJ?NyRjGXw=~{Um*ML~5&Ua?knDOI$#^}j^{9I<9d3ONoUK-! z$BQD6_ARsd=@Sn1UCA!S3pW?ww||Aifj0U9zCryq zGXpt+^wZ?Yy)G+8267%R4ENEPo?vHTyJM!K2&g&j7Q(xy0HR|=Jc(d*Mt9!DYNJm&lbx{3 zeaKH>kSnikssma#n{3I+LePHY`aStk4{JefY=V+ynRTaW9osdK>JV z+sB`*PddE(#KBOL*$1B<=TF}kWt)>VUf>7T7CyPWr~``^Md5Q^7&09!$iqIW6iZHo zuq;~HgLm-hHzeDUedwTd5a=7&aCz)Edjx(U#h@FbR*-^&KyUs962pHFJ-uS}+3+_h zln~#bF(9gi?tu_j!*u;N5yNoLZMG%d&se74)Wpe9qkBasn0)=F6C5xwAp~v7Jm34M zm5wGEwSUu*h3#V$493fb185GoN$V@al-b|U@2gi*S{E9=w2kSjSC!fB=@T(5l!m+e zdV)L0z3r!!<0O3lWs1ELrs?~oWr#Eek&Y0HrBq5Tadcrdg^C#@UQ2gA*?tp_fqJZXJqWW3tJ@KDsu-Cm#m+ix>mYBtW})fB#w}bayY%AIbjJ8G^YT@*R*?7fiQGAKWm?Ie(8|mb z8Q(sf-nrZOcFfJ*Oj)nUxHs$O&lEqU;H^qfs}wLgcRwCf87o#Xn8b1>NOSZWR4{&r z1KyFTfVj_`Zm$1i8&~Roc-YW4b8MLAJM#JTCn1QSf^iCse$)cRq6xq1QmgUIy38}d ze!V3wUF7OOY0!&5UB-3n0xg4@j9<_=`u25Ny>zWIEm{fNGB`*9^L?-k>P3#<8a4)x zL+uYdLW^F)AQRUerg30?wsmk&FO6+A9^)#n7F3rY3kAKP*&pk#-t5%w!#iq+*@)5V zq~l6+*$aY~_F+gESNUeO#Ghb6UGK@wN9Tdj$cH7&kI)x%N&*Oyj$XQlY1{#9uc=@< zr`%QaDyPy_@c}0wAI&K<&B!^Gb82iH{zdOFT_1uvuy(-IubB)pbi0y z@MA}h>E^8Bu~)hrSJikzXrE0hc4SJX>P@hPwK)E(Yi*gX$7Zy zRu~y_WCiO@Mt=(0o2U+)_AGzwLK#JowW63Dc}dwu3>WQ<7ci#KC}Y&p;Y%pW&C{8v zB_3Z;dY?_|rj;;mA!vtVZGotGNExE6PGR-m>vkVL^(p$$R8y8TubE&c8fEhzZl`6| z8G5EmFiMQR(T`gw7-K`eHs1xKvAS=I*ZA0_lf+&b4A?l07 z-Uf%I`~CVmQ46ljJ_YzHtug%R9e_gD>}AwYMwpXZG-O z%g;2#HQD9mPT(+QD484FclriH`UKAf^djJenn0s+h+0bY*8b*9gTSMn02KZC@)oWe z*DWY3GtZilnOuaMX+{jeYdY`tcJwdDw2 zl1=H-o9zTzpxsiARwOYJ47tFpbibb8_4kjF0IEdtkmgyQKB)y0%v+IKk{PQGVEN{y9z7C5KKV06S@r2m`ss6&@@Ni2A4dzF!L1H#m| zAL5SXkX^aR3>`WK+uDE2Hf3c(_?WGME;YR%_Y!q|e(t&V`h;YOH<`0g1OE`ag?N$t zJDKm$Y8OG1d^bmaNBoPZZeKQ4q$sL3QBUn`BfcR&c1U-~m^HsNW-?WDi~3AGYu@T0 zxSP?wvVGN<$sxl_Vw0%?+3g5}jH#&6+gO-Mt)41sUqyf(+TNLU8N^A$n}&6~8l^3t ztko&whJjBfVumL`xkskliYezr@*@ePjVM0P8sgA#_+f$EfP&Hva=|2I)pIc8br?)n z?FK!xa$g!iVp+%TomzS5zGvm-lE-XYJO?>00q6GLGuf?c>tyJtORChk8POx&3G`|gV;C=^os<3sGk|It4_ex zm2G=!N#M9-u*WV)JzB`~&>T|s< z3kTgA$rpxM;BE|5YV@kFm|OT@LC1PAHEpUBVo=< z80DLDg!~sjw!QPeWp#DkyUb;YVFPjN%?eWI$_D5^zETgBs(*jEM{=&>;K8}H6&E)I z^;zuLaNm8W)i<*0Z3nBgjB)YDp6PV2`{h(XL5t7QLH{icJdZu^etF=*xLY&udcVGI z<)bB*{KEZtnmSqSdQ2XFVJ zM6uF=>C&lmha*D&_S(xDV(C5|ZiL;&~`h%!?n zS$Hyt3WSx|yXY&Ha+wsRFc71O5JNw>BGYpf5>Uo54c;Af+2#4=`~i+}EST@B9+jR! zy!9d72gb#12q}~eKSkB3o=xlqV|>W3vBtp%Dt7}K6qeJ^+`_%HKgEqy}9_&0f?0q4(bO<^B)( z10S^jn9zJ5pbBMGfJ4w!KzFX}6r5@{G>eX$S_8;~MUp_Zs7BB^mG71h=6Nxl%3s$u zwn*$^s4|^#XmwD;s!jv;V#X7l4;N}?=Pcr^w{x*eiM(oNZ)PgVK$sW~vVk#E@hVBW z`BQUXU>rT7@zdyCx09mZcz0E&Bzwn9j*tf(GT>+>s{wT}YyNENsJkohFpiMlslmQq zy$G%gQ?YJUlJC@neblL==?-eb5kME~ewhW2|Kz&Sdvjy}L8}S-r&f=DK%?y*MSZ`D ztymt>enpHzu~Gi!R4Rd3&RroE)A6EC3m>1z#oaC|1=uX%0wv0kr(gv$&*CGs76ZjB zKM(p#JnlDk`fs~bBX5CrLqGsGxkN^8CK>|l>wQ^#!~;(4SQTP7h*6Hoi^@Q?h*Pd3 zr=%z~$Z6Pq9Bx49s?^wsBA`$LriBagfWZjNvt2N;0*jUV_$#^S`^wh>+-(gq4mD6z zfU;N#nni*ODi{?dvv6?P?*@*n5a4dUZ_XhEaRlL>&@$P{cm`E_Au-tMzh$0H(5KGhm$Q!b|r6;kHC8p$3wN7gU z7*JQIOgj~vT>5ei*>Yq}xZcRk`z7JI167yYv_8e0m}=P!?Q0Qp^NNKS-9SZ!oORoE z)rC`&^z1z28NVQIz%l324~f4~lLIadZvT19|4GKEKU0u*Ty?UV%jNw%;lru)(hrYv zrP@cJ;_djbGh`F6ICQ5sSD@??yxBW07|RlgdJk-_UsG>=Itv<~vofmQN+9Uih_$ds zqX)4R4KKbM;>~hH2`7uvHsABp5=LV!%U4nk)JYC8^7Bqw(qzKD3)s91sK_M}x53gH z-EUCBk)RD8clFUj^rimHG<=?@fk}aREl!F4_c=4EMBQal(Yk{u8bvhBGP95tLJJ##DI!0VHTI-jfZFbS9 zy4Fd=)pl#p%+=#QG!pyiF%m`}YBTSnTD4J+P+vFxF2sTGnpsd$nt z_A;_!yxk`l-CcBhr;Y~0#WbpEsOWZ#=!sY~4#O3{^*aV|A18Hq+QZBpF%dOT3TS~? z(!CrL?L*La98}oYMb0YH>>>uCWDA1^=Lq5RULM22H<7Ajb-Cj8L>ds2LWB20o~fb@ z3cO$fT3LFU%!gHw`5fep$;hE($RllvmxmsvS8ch4R05{2)MaHQx*{b3-SZu9=KV*% zI<3^2LZt>h{7-%CLmQ_bTp!Av3bd2g@>?z-DB;lHYrPL?7Z8N;rsQy>_+Zop1%~j= z9FCgJ2Y7MU89qPw+TF4S3;Vl+GHxU;ZKo3333 zQ|T$hU&eMC-#PoxW{$i~Oi%YrmINbHJ39qo4@dSx)$y8`E(vxkKs~e((?#-a1e&OqKhq@C~>(IbGE0W=A%cCdW9QI`U+#e?}zSy%p-?fYS2pi7te*8rFPz<{qD zZR3QB=h~&ujf#f~3q|GL{bW9Wph)U63AjE8IUbO@xONmt-@3@x^l8LIbeoz2a_{!e zv_<0)FgnM^ar78wD>C&MOfx}}9J0%$ad9IMSMF6?n}CeBJ32;1d7WbH;<~f%u1~z{153WpFI*ot^1^!raqi73oD+7}WtkgE!3o%UUpZ9R zSQ+=Eu+ixIV}iKu?!pc0yR!$De|)bm-R1Z`%IDklFZ88xN!=rTT3)q|!OjWZo}y;t z_?>XJ#e761r~cy~yy5^H>*^Zi69L!)V?@o$M`ikb*qyb`QER;%;lWc>(}esMHX==m zlPAgJx<~pGxUl+opZB|w4cX~%<3>jAQP;zNcBDTk8N%72z96bm4uu{ogt|j%A=nc? z6+%%&jg%qa2^55hGY;W|q@nts&y z3G1lwlh{e@OnvN-QY5>JT`ozZK7~HBn+bNg*l7l4P1&hBIScDZ_@{kAuk*k0^}*PL z!!a%&JFUaO+&L|zYmi8&Aw7yKo65n`@$Jgd_EaiGgDNxV_uw&D3?l;#Zc5B5vhMcr zV-OM8_h+!dxDYSS5-t(f13p-{Oou@+4Mr)2T-$tQ8H%n$JI!6HHL&nVDeS$W16A^{xP|}S68v~Wda3c_?WClv$&QysA8l%cFO1l|ROw6Y05Kt=7 zlz^#hXA*OWQ)CnYM@ADcRQT8eBA1)X1d0lZ;Z8<*peYhJG4w=KLJ5yb^lLY}nY z=t%dia9n|XvWn~ndY#|ioFv3AUlKv7zZDD^qa4(0=0pS&# z?N)vYNj|Nm87aJgy^F`*MdJo7NsviI5J6ZF$8UZ;Wt2^5lp-_DaY;Uw=f-whsjvH? z8VY#}+I7WDoIs=$NdzKQCRr>Ipp`-m3=aMh()s$)yXBF%-yy$y*MFX$y@>WGyEp{4 zZuCHSn4S{Zubt3i^gxyvtO9O(x*fb-ZNy8vE4uGUNP0?sqHypMzP?X9iFHmOJ0iia zPg%*5O`{!xjtu0@M9C(%Y(<_7f?s70UaFG$OjyY#$)+*A2n5wG)*kKPd+s7!*-2;u zygO*f<`rZ(I7^Ezg#OKXkMjg@{9_P|2|zfO1Vnhp|Ck^#mUv2Z(viBHU?kau)mCN0 zR`QTfbQh9mi6=SuTT!+Am`{hIYHEV%hDVTZBl5~}zPOxEFU_hG?4DO1gKm{W&DPG} z2_|O$`r5h)A8D`_-@;xH&e1Oa`B-CN8FJ?A2FG&&oQK*t+HfN0KLMdJpAW;iv>Tw5 z(aGkA#$3#P6B<*L{e~Y?m;Gik`1Qn_P(Fy~p8Y)!x#jZR2ZK$Af+u&?)x(S@y5E3y zIp$*BE@X+K`cOU}+PWgKR`>ki>eFuU$40J7a^fG@$rc#CkM&#q1Mi_68+w&+Sq9gn z0Z2|4)3J2;D}ltWNictrN`xOnMDkGTYe_KfAaeYwohs&jN9|ABVfLK@_(+>xiar92 zA0g<+*@3ps6IoH0EDx4~liC|q9^`Hx{on^*evcqS9&vy3Log(!wwB~>Fb_tkzP>%)5L z4*kWvl5wm~o>~7kYt{m$n~uh#XdOWcn(A3?@e0I&L%5|>KN;`&^NPFEO+3u$!8~6{ zPyhbZo3E-D$BEXTdnrZTzV z68HJY7(xnaW6^zgX(j=+Xbe*y%Sk$JF&mjA%~_bc`>dB^{mz{cZZCth8Xlev&CJ(o zc=U41$1R#~tnoqAl$w&!mb_+QiqWh3d;zc*`Qz!|fzWs`35G-9_LF|h*PW9vwI#us z=KnQ9-*fK@&*&}0?OsD+*8T$7*$MxM0O_@(OEZtMnnKoJJnNmJZ$r~hvi2|G`@gW1 zRqvpjw`R4vBkg;_wuY<*zS(N(+AG|L6cTg7f4%&iwIkn(rXTk#U9M={L8&P$Ey+Lc zNI75G@FWE6>G{B^m7m>l4(;Ba1=T86Gn>_1SaYo+MqOG)J4`G2)=OGb67SX9r&V&+ z+c7xQ`k?hecFm2#gVuO0&%Fyhw?IV79}7J*0<%{5Us>Vb7FeZL`?smp5S(%p-Uuv( zTsN1%sy~eUYqx@v`4STF29a;X?X+V^c;?8M7d(6S21>U2UorCZ2>Js|V}V%<{p(<% zKuK_5Nf5jbFaB5vYlV$?Cc~o)hls_38(g=GrI;8m^mun{_+5 zeCE;XxqWlX!oRldIH}Gt$(ZTxdXn+w^o{4(w%ScEdS}F2Wy)&}CP#jV4_aMe3W)8^ zp>aLqk|-}PD4!NwuuqE|K4sZzn9H29UL7~K(=-k4TcA9Qrte>0M~ zqtB7lW+>}wBFW4&NOZ*+hrv&{Y)C0aSD15(!LFB3}H@{tbN8< zuPF_6;fEf51PLe6!aQ!;^J7`b#qrurC0c5p!UocQmei`jd3k3$41ifR1JjXy9O!mg z^)^g8B!Ajin%rp)-P3gwBu2PqBoH)LKQ!tmfXL~=XZHyWdJHPy;S5_!fFm<27uedt~I>%T(vmxV7Y@1sxIPY;V2v@(tXJ2Du_5x6P8UM5H)0knV5 zXp&c`e-q~vDh9?~0!9PokoG-q8;SI5K6YPG91Y4Z86Th6tbqj&#KUOrVOS=rZ}WKVM+C3P z9A3LHE3s1EIN1BEjsNx~AJf7o>xah7{=!e?zWY@8psl;;{PIuXGuMDKW4Q5;x1#6$ zg<)5?>%U{tp8WPIY#k@y>pJ8^`T# z27#?@WM>rP7;$>*Z30pNzeT6()N|cHJFrZt8w}fIqE# zv2XHr5qlqh?|4)nw*R}!rY?%N@&ntNyI88`8=hj%oxOKUPa>?4v$aWgPdV(o`TC^8 z&iuP3<~tA-T>X{!(0P$u)fTu<4ZVLQt0*hc`{AdEWDv^@Aqu(rCJ5O1yM1pFrs4i~ zm%(qck7S0&PZuB!ClOM4R{umIX-7gzjzaNyuT#MxxF!BUJK-7B>RAfbRk&F4$#1B9 zc-%(3FqMz=zy^xps(KI0Z(&>}aEKS+S_+AoaIq(Ys$O)|m}*KjhA=b6*p%H1kyr)O zsOl+el}e06pedtnOJKmzNR>SWJ*10NC+HrXA}mzig^EAY8-re2LAqz0T`~ytZB)?j z;HZfgW?7e^8?mX~qFOJE0Z}_tXR0H4xnxsHge*eNN+3JaPHj&xCYfFc-PXBmY&#zB zYj(i(+0+oX+9hjj<&l7&5#@mXPz&u5hhkuh1*d zva;Jjy*X6NVJ>Rto=MysBcS#CvO6zAwGLexMd-gxbqHCsZjGU^m1fx`-DIiM{;Xl| zSe?PGezC}utB^JLXC0-VhERK@Tl7{>fov+s#&@lli>71>P=MRpxE`3BaAKo97kOqK zkA{<w&Sn=f3C?hZEdNOAQe?oe}Xo3rHvXfLTEDMymXw_>(gtIaKhVB4{O7 z7(Ks_Ho%4ftN9GOzQ=%KJsS26(!NzT58?Q|@DhdepV2%N3NuSOTHj;TJ`CB}Q1U>a zEfWq`>uAy)m_r23JotG0>Qp)XGq@8 zEf-@6)cQLWgfSbhstcYHpZ39)cCKgQjOG(NsSR3t*M`=$Ryk2tQrT{S&XIB#;v!DP z!I4&XG-D;M+=9xW*f5w_e5Sc>b_ZeY2b`Ojz)MlC*5xuN5O42h2>W#wdQgIKa9m@j zjIf&|va!;Yi+ygcBu9JPO;}07TIIP3862np(T_Q!WY;rQc?iowfW-?13YCE0?O&px zf6A(k&k`kefJaYlzfy(IV!2{rAgC9RoUQAPvPdcKNtm zSd6kh6j#Aa08myr{Srg9SuQUz~FT=DqjokKA+~z6ZKdhi%*Ktc@K3F|| zAtQ9^yAy<5ps@;3W4q7ipuY9v5gG99PCOq4`wfn#hwZqG6*o6e$#vM+S%p1gpGmo~ z$pOJi4tl7yqj=We-^(&CD#*X>tTbJyv+nVr1S?3w=oEHaPFkMzKfj2JGt)sCU6-Kp z>nWM5AB0|mOXZhRjqAP;lBu4BM63V#Ybsf#NLo0RdYo`w$HQbQ_PT%~A`-9|FzmgX zZQf?U>|#}^JJ_fe^(+rkRf)=c{f;d?r|0 zz}xD+-5s{t?+4w8BIm8kxsLj+wQJyASP4pB!BmyOd zS*ZJalpZTfWq%=7Z5ophMgS>y6Dc^e8$y z7KVPzs7{px*B#`>_`{xWZ5Z@0s>9E*A%>SHamPmr;sB5)%+LFr2Z2dCOB;G1kyTHa zH~#&yJ4gDaLhO~%IEQ6Vvez=1=ien=sMwz?@|@W&Gpjg4VLeeYFKcG^FV@20`cPJu zOp9!uY{i}j=IX^1R4J=AI;rC8_w|EQwBj!oe@$2XX#eFz^5JE_wCzV)@4znsfxJealh<=nF`85!zp z$yXM|+$jFDa#HF|J)r+uXOEMi=9i{{wBk;4-Y6C3p7@C4R$m{ z_$ZYrt#1_sJ08DXm8$K<5@HD^9q4)j3EmE%JESx2P7U*JVDa6S7Uf~Kk;3cw6pNKfC#5=! zw7@H_3>^}Fn=Hik1DUy6Tek4od!BeXm{j5OqsJw+1 zdM_hS|2jnY_-n^&?6Ll-qMq4bI-t|a<$gWZ zDnH9X%zu^|7E~;pYzTQ5wMiSU>($8_0b|w6EadW___EEP_wePPeStyM^&x%0 zMVo4mA5uSCw!Q<4tLS(+5WWU(%u#yteBdosztcGF;9oH8w~59O*O}n9m~TxuUva(+ zKOVWDi4+u6L&@Vylql@dm7AsxzbCo{5P24;rAGe_ey~x1e&HARQab{G@o*sVd&G3cI6>8R0!V}VYAC{&NFBaWOk-E8{6 zNG8-2p_r^V;+63Cnv~Ih(i)cXeTD>uA@=zc*(ICp0V;%5~0twSyy8gL}E* z%i|RPDXxacxsfS>>*J4@rZ>1_{I(5yxY`a4ztBovJJY&kMq^3#&&jIU{F z!9OgyVR$QQ;m739MI@61&JS6M`j$2I1r3pzt&YK;X|w6htn9n*!)U`^pc?&`C#`MP z(_P)IH@EDWT5T3S^SnLK8Y#BPUM%>4@eK4;9{q@sQIy4t+h6(p zd+KHf*K9hiB({I4h0nBRe;P{mhsz!>kyxkKfs!d3-Tw9a4aP#5!Y#4vrK-p~4~rvV zEb;zF)c|UDJ8%LzrwAt=2c-9q-TZZ*P8iTmAk_C0^L(?t%XVrk7VJ;^r@lbC*_Wvu zkUMR8&+KhqA3~Mi;(PSwEDwIN{k8*m=B8uI}-A<7lcKi z3FpHimUQn}5WIl@o$L26&IUmYa*!tX`{&Is00p1ZtXww_cxSlnK7lKhRHSSWEJld( z7CRQevO6dUSe_;fiZ zv39!9j)gABWT85V#9S<8F@wok%wodCLi87+Tq<_KVkDZLVZl_~U}R24LVLs4%yr^s zH6`32@^?EAy1`n?GP5)TA%B!VlX+I(Z8zS7M&U=E|EDzZzfpJkFSC4ky=IPyBIBt2 z7c`}Kbbu=b`$?o4%GrACLabXRQ_5(WB`z_H_9+*v5#~PIQ+DHY#nh1q)KT>n9?!7y zR`T&z?=m4vr0m3O%V7d+ePi>irh(VMi*xJ*^K$dQg>}t&z#o|wQl_(`=a&^BgK|3 zKk%5!lo!xN4lbKHv82+_=tz0JHXYrpNK$B*J6aj$krsJq8_rmj zTM~qrB`iLnKX4Pd%bYAF3~l$yl!b_&Kgk`PVn!W`h=1mI6enf#ExY%>eOIM)SZa$HbN$e30hPl}NO5Q(akjDC5vVB(j={v%zYJ(IbZGwim8$fV) zagG!tZM;ZUEQni7v4e$ieuuOPVggeLB%CpQiXD#ZCZ_OU#qeT&Hb;ghj_M}y^UK$w zy777zQ|vWSa6?s$>RF76Kmj9!K;iY^7GasiXbCC;tIq1ccM#j=8*~nI&SMlyE=j2r z@^bYEXr5BYXHc{V0P8~u1^!$Q5*CAI3j8<>{67cb|GifG|Gg&J;o%WEAUMcqy>?Xe zUza3mXJTgwF~ze21?K@7nnH+$J4!>;2OaTQtZ-Vfh(iRpJt}44RyI0eJQ9@E z=s+j03lL?O0o4c~tj;Tzb7|`FIvA7SPCW;TFi7yAeG@GnYMMg*=%4!Fs(#m1vjZJc zSJ#dX_>0aeZYKWLl{m-W;q=Elb~q>LK*XU7>F{RXcw$%Lc%ph79=ZyJu{@`rc%T0u z*ucQm{3HpUa)KI{0IEJpkYe=A6CrkK3`H|qSPICAKFZ5~48fp(6`@h)2Me>%F`|gm_#6%K zqN9$9FW@9tzs|g(mKYhi0wh8@CZ%E!c!Jf8gDGcP2`CDEeG-vIq(OEcWu!upW6G#%n2Woa9Z{q41BoaI{sRPo!+0Y(XEO0W>`;RS*Xc2Z$J2u0)hVr>fx z8LX5;m6Ff|Rb{9vga02I7$h`MCUh!*;vbAL2G0*;V)@rZaz3&POtM_bDKL&Xs#3w= zB{F=OGsAd4Mpde2JW?PJdVis+ZDG}XGXr~;mZ2tPlO$Arfk)8aa%@p1XtE^%{|(#s zf8M@H5UD*F-uS3nuKyfPgydn+k`O0=x3b&<23FO$& zbHxedz`21%i@%RuJ<}b2?P1}aJ;r(Vzh^AEm+>#Q`b|YZ4kqZX|M?8{7v%WevBVm0 zWR<$I`l~X=89@YO{ zjZ$s*BL6n6$nN*TVTD6&BHwjO{C&5#aN72Vuv2ylm*T5u5xd{rxHS$@9g6*TMruI>AWuYifK!*a`8 zuT=~lIBin4?CzlA-7h+yM*2*AnnvVGEd+vwwSziTO_xCH#LtQpDE9Oo(G|;{ot66g z5BXhBi7Fm{Q^282q7uLW5hH1|Q0yY4g1Yra`0ti@*Uj zt^5GB5EsR&S%u5(qs!Mq^Mb42^|mfY*RG>K<+<{?a=HmD{m{%RQ{tS)v#HfHi_#Yz z()~;ZdV%N01y3(?$~GPlC_-7aVOKqgjd54`Hf5wGjY(NYXC*IY)Mgr=NJpQ)GFW4_ zXMt@1MRI%c>!PflHI%b6U(N8nwmWP?NC4!yo^{u3z4w2+_JumG$h{kfsW84D9Ooc| zE@&{vK?V-Y!0=`Hk0#Q@Wd;*c?J}&_LMw(oF?cBlO~Ryz-V{Rdt>%0k8#0S7J~(;l zJjBOtaoC!W?|9BSC4Y<;PFeS-ijN5#^W$T;q!`Z$Om@=43C2TD|G1y>MZp9B!@}xjFvGj7crz#4S zZ`bTgRMY~2;B9cpyL=yLvz zX{EG3(xExTr~9k}BUdr*M3r{#3iRKw$)m=>xVSUH{QK+LUXdJ{55?9l<)(ZG-+)%o zPqDQhg?u<}@o^-(4er(@Cum60oZx0V#wRGK3$N8h_6t(5CyK23ksnwr4z)thFob&S zn4zIzh~Y768RKrQRN)mJ?d1gzNBA(qCqb^lD;Op|%ii80(wt*QCrWb*dLQ>bmP&Kw zxpHYwZhtPkP2HTy> z$~BZ47EwLOUStnK9>vI!7k3OxMb~(W&Uz6F8Fonfs_mNzACdl*$#)|k^$iO;jTur_ zuJ!`1BVRaVPGC(%T6)5gZu)|_A&I}lUzLS=K@*i)68z+h0HD~xL}sh9+?PZws3MjU z^ob>6Ehv9wnJo}4xXB1&1+n}XevDwU6+vI(J-XK7tIg;==I*PGktMGkdvV0?hAFK*ci-ofyWM%>*zZqNt8g3s}&ChemlHoh0@IplUH8#>22ywpY+{6t}fVN%E695aLf<7 ztGKWAekxu;U#ok}Iq)fsxXa>mfeg?O z{U7=?Dqhibz|iC>pS~=8Q2@>)j`4gXsG2=_&qe;iRSkwGSl~d^W!HCXmx1{S<^_9{ zi^56}s2|V?*`sb+U2M1)2S;Q;(?$#(#|H1?w6bb0c-bd`dlR}q3`w6&`97g zMEXQIOQb)QuN*=>K%XIx#!3!CvTh}La?u?1cQP1@)+ts`t$s)R2sR}(k{XItoAh!9 zWUEd$)t^~6@_RLEB)sRv>MH*7ux(}w6}ReV@6J+wX$x8%)@~g9fpTrf)0DVxPdu6% z`9U4N=-OwJ{*B7Uky|d$wsvO!6&~i-z7ne}PQS9_>%yu7aM|QnMBAPJJ&UNIvCRz` zMf;|zBaLErJ~_XBqQTtoX;CoEeueSBV)rPjPY-mjGcPTY?lb11QWQkH^qVTy2V5oz zb=&|$UJzuL&Qt-;JBI`%0FV(Ib0<+svkuB)MQbOK9TvHjku{#!Uz=*_*Gsd{((0}7 zo$xycXnB5DW_G~x3ioqHD?-*p=4IfAnqiekKJ-LTv$TDN_c z_m|RAmvM6STDEa-$+XHHnVPdQvx6F8IdFpa>Z>{WYQ!S`0zLz~h;JFA1)l;e=EjkA zF+PoJt^MTHCDt)o6#;dbS(&$Dyer(ZV#w8eFIg|vnk+Zx#bBI>Bk;ugjaTUINE|fx zi*a`pTa#t>RfMgol3DXGohy<=LIAbaoNq7e#U&E_%>7mdlj}9Sr(#^qcTnXzr361i zzWq(Z^(OJ6`@-II6DwHFdizSlFhjqUBZj;fbP2W&T>_g4GY9I6WWT%3O~V^-I=MGS zpNdNNRM;OfTr=afU%WI$`d0hL4DrGl`~72v;7J7!o3A8Rb$tGp>M7~y-+>(f9v!_i z7(HvaX7tXCR|Y~Qo$*RDUrjzkuB280opjZ#`A-_NL`W$>?F%%p!!G04*>@?klv3X? z+0+0-OG#l0!i*_?wLx0+{7h8nbpe`45V9yZh<R4}d;`cnw0C(HG{&uXfoyB+vRKhb%C3_=SveDWEi8Q9eXp za#Gls#eW5F`0?lqk;5W7ggd9W#%6ZaacmPggqxReTAPH{6IV4Mpf#&teWC!l{`0Bu z5Sk>~3i+?Rn)V)je)u2VhW}uO>az+(EJacan`XXrfcx~+MAo~N|-lh`d zgBP>G%w6COa?y-=fP@x^L+Djt5pNq@HfZIX@zt4vM7~BLnmrJML3|GqnDhO85N&0E zEDKVj*8{&3agM_WfCO{cLM?nF_VG*ezVS71iv?0Lh`EKJ=Yoyn^$;ctakWVNH3Zm# z1Pne~S6=hZ&34-(voNF`MXt5fX5+F$4a_prpk-mjC)(UN9jRM-zftWY5IZ0G2QT-X%y z!xeub8I_QO%5^uU`3f+VESYB?DNuNRXuC7-Er0&E5go5d?a} z=$m@J=}6G7YFW4%)^IQhZuj5?9@IEVi1H5{%=Zu-F(iE-M37Pz@E#MkguDrR{5Q^r zF~_4|hXH-!>!$H@uooRn0xc-A-vC%3`UxOJ049L!5wQD|khB6@MjZ7f4hK(6U)c60 zSlu7fSkkcTO1yNRd(+O(CGv#1D_T)(*Lw#-FU;2QST)Thb!_-R)%6ai!ylj;O3@H1 z(4a}Myc<-VjoF!S{@g3~<91$T>+`ca*L*Gi{TbHv;=0Z)JQ?$PWY7KYg-@1b2V4#G zj<(zX;qYkI-Dzck=mI;;#bE|D8uYC%m2VBlW-#m@yelqj%RWo9n1z2Bv2DCczV!`C zGE25$>S2aUFXF?ruE!(i=Rgii3-psNw*Ps5B)n>4w;lZBDXWxMq2FHm^rCXV=qbM$ zpH6>`#3^I8V}_$O{|;AInv8=Vz3X^1=vRB+LJCIKeh%;=_R-6m_H+sV%?U&1gZ5kz z&$|k(VzUyT0ZjmEDkRBy-}Eq)NiN=L);O?TJ;-T_x>8`EhT#n*!0B6Z=^7cIK}Q?E z29C618_0X_(bq_ZJ?|ETupc=^1n%hT%P


hV~u=CwSA|L>-w)5AcB0G03@gt9|O z79v!$)Q7VeG(89~;ZHss+vg+Namw`68ugqgUV+%#A(hIOh88pStq2t-g&8DU6rvopy4?`0rv1K zOQq~XiVE}ddmd~&<1UC&LLXzvIJ4C_tsK;NzR=K5i(EqdtjTnv3Mk%_GFWg0<8tgc zW|tmpOch4PUPSNT1oHnu1sp>_W$dcKOL^)GSzpCW#N>m~$ zQN3 z+*yZO!2!gnuEwtvt+g*1j&uw=p;3o^5GmhZnO<@WO7V>UtY7Ve6LWa%*hc03j}M3i z^1X5*25LsI8C8N}ING3XSa4D_XW8}5sA3*lN-)k93k|2%`pVuw^FH1}b1*_2$e`ti z;eNq5l!B()v!#Iky-W!I1k@%t6%AejC>*`BmQ#X8&MPTFOK2#>^kkVJMU;;#*LTdN zO1U}X1%bpwXZ#H&pUTAPcHwlRuy{xwVx)U8ae5RIBM>LxkkF}+Js>2;TJb?$%;Mls zLVZy>l_S(oXB`)>qme;71vnz1I4>VVjPN#(L=Xe%pRCi~N&?h1q+i)ffANhKhYfm03;{>vY_JHZ z<$)4p6{NAka`YGoNd2BnH3u7NZH-t)YA~x>g`@0;4GrYH(qYiaQgFH9QG?;p0-K3{(2_2oC54aw zQMQ71G+Lm^9q-Z9_C3fIO=<#mm@Y%Xb~LUVbokr_+PifK(ik!Om`GhR#e60@1M1N4 zh%(gi>~ttlQZ~c_MuoQ;`+Dp?-%6%w?3ZEqOzI$M7^I-0GwBI3ze*hd5$TlL4efTI ztn5;&%;(wTKN~c{lgj&I`vtYi(6ReZC99lFtu*Nt3v}BS8Jc7Ez^voIEYq<@1>7Yw z3{)~K4IEpA)|`jxoDnOwN)G0HnYui+PQC=)%!u})%el&9T~eb3#-&zJiY}ZtB@oCB zZ&ZiRXt@4v16*3GKp>z{{gIy%8LuUzGdFx{3G7TjM`UE#ofdKP`3)I04H;Ej_DArc z$N=vPQ;TQbbcoc5TOWxc{9uDhqP1xN>W4pj-|6mh(%l^<^yd&JJkD416yIudh?bif&ndf1FG38Xc53*1FEWlTchz7Sw=vav(<{b#p2tqXKR)1dKA^7Qm>Z&i~^?NCwhIz`cqfh_n<- zLaIA777!&09MN$}UD96{K)C3#V-85hkU9)pdB}T*RCQZKl${O=vXpU%_akxt%B9CN za;eKcMD;C3RA%WA`FEl%wFl#n7*Me4GN&8T=>s91{u;?oOvAv&B?|&ygPIM}=@G4X zC}iLJ@hSf!6N9E;c~iMV!B*gfD1gdS_aAF5J>-~^gZ6V5?rJJ{0BZ{l?R92mX`y&L zP!sFBQ@vNi_j3N&IRu+seowSnY7)Pd4XX{_iU7$5O%X7%S0m~o#U+W6J!7_7@Xp@e zn)PlqLgsFzU?I>7Cw>6RaDFw!WF6pJFY4w_Kv4IcBAmMT7&SS~Z}ga!+6V+1O5^p; zwHAZ=H1~HnroSr(4Lgb+B1;-2Sh%+b{6V{-uKfN73tjwNhtC2{Urk@AIgGJc+gXQq z4)1JdZQqI}le)vyev-?5k@Ee+#z_wJ-OQG{QkdZL_PbF!&Bz{fDxmHE>ak_b7H8)=HiD}ql*mSsbh&uOxgf` z*e*7AR?cLr8qxAL7R|P-JynfoWUN1oxBBH&!v?0OcfBiJjzYR0_x4ss+KM!suA~3k zt34}{vXp?=M(DpwO*6dVczhR|G%5A*9Exqv#<1-sQl(+SUK>Je@P-8j6*_FT5BuMd zVL4zc)2X%2i&dA0bW=2B5MySOY6@PyIXkBr~HEZclj>eHL^6~ z-r#jN@NV4QAQO-NL-Y+68N@25)=XW;<4Q|QENRkdYkfNe2K{%Fxcjq|Cv`U-Ul!alV2nz{}dGC#LJ#41R2 z(AvwQAiRs)W}KOWu5Lxx8|w09ZdPmqOrFk_bzU$FuKv&*NcuT znjpK=lgMIecOxV5u^i|TU0@4Is{tOuRSO|do69*58<5Ver-$4C{DL!pbj~}2Fd=}Y zT#EQLs|?7wAV3FSYu`HPOQGMhirf^E)~4=7^QL*N(yJB&oS`vXQ0Hp}TODwgU7(JIUFSI8Z*LS z?LSGw+E;N=xNrkOe(GNx2K5_Qhex!(kNq;Tfped~N_19&i$Z!>tTptlT&>tOUvQDc z*QDVMT&=V(v7x{sizE)iu4!N3@4qz#lzbL`mU(Hw=$1iq%Gf0%z7=M$up`8^RJ2$Oyn1Nov{ z>pV-{Dbd(0G9q4$@KGF_8KX!6wsW~uwsCP$f0s+@!(&<$Fhg(M#Y801fP8U5B=5S&-VS~yEKG-41MAw zcmTtyozRJvP}I2p?#T)32B0%V%J-+8HtsVjKIuQ@yK{1BkcN%80+8)mKxV8u~FpShiUWr)@`Kq($M=)Z$I8Ubh6Ore<0be~^DUcP{1CG)#G z7uoGs9(C8|D7;9%@`l#d|NBgLM$Tr+{BGDSBCzXP*R>(q^#SCy1bIW;opG@E`N0?e zi4XqwtSR}q8PRZyhdpm@_JA_QXRAU2l1hkpQ#U^V;a?J@$e0Ec=MdRO$_GBuS%RT1;?jt2CxJr*QsnpE2_v&{OX zoW}JqmtrH(97t~j)GiM`BFL<_&w(_=mIyz1*Qu?61L=-nqT*}1MAU(>@Usu=;j1TT zifCd-`4%^=y}a$B*BwL>SoQ7G5sHor=m)tG-{G?}Pcx#n1U~i~{*6xi#QyXT>c$^f zI^Sm5?Y|-A`^owW{&Q?Rk}3aB{v@4lx;AIqB}6#b4i|tRKLXy#yvp@|%x+tN+>IvM z&P6uxaY`ua-L)ANpjUian~{-m>~Zs%*BKeLN92A3%ne`!NC;h_1>(fWi0KCC?VFa& zc_he)$pM+)0F(iM3rG&uSQLN;(2ahGd>;-*22&hlIwb|eu2>jFz0`+k4{R_$TACUC zgBAcJK`BBR_+Q`#$jpT29LWD^#a)dN0U971DPKs4nYD);aF~_~x(nh167TB=1C~zo z5i7rL3pE!3a8!3ua)_S}Sd_Wj~tq2_B=5N7}HNp?6zz6{X zp*TRTh5V>T!+}fG1aLD#{;#n1NRAVmH1$uOD)N7Ul<%Bv93JZLf92O9nX26dCp=bN z1|$*I?+Idy0+U%}I*W!0?Ex{2Vj(526<7_HmFf>Ffdc`<-zAiSl&|oIDN+nLQ`nZW zVv(uqQn?_-4Mt7)rbW_DNm>(P%LHW)D5DV==z_#_sc3-!0Yx4S*>|gQY?q2HnA-La z9|@hqLI_DtG<1$0S&$9nBrtnI^7NKSNUbnzMwF_J$X5g1v~<4d?B{fX`&6fWX7s}5 z*?=7R6rBC>;TD-y-&xQ8H5(x6@6ObaFRme4ta(cw>u21uE87m&F_2~7BF zk9mp98swo*gUM~?f&dyfXe9xbG4)Ri361Ij2`zz!G|kXjK6ARh9+~;T1JW|Xgl5)s zsqKaKj$8she}3GOTgWf4{yW>iEdldBn3PQY3!2PB6PZ(BLF%JwMsPZDJ!pEXhJs1UF{he+c|Bfs#Lwfnc&=diI;C zga+S3w!d%N0yGE6(A+X z^3y-Kl&zC|L(?iIPfznqvFbO^{;2qcB0`?|!GtlpZ$b z7X4U%zhwH0JwY{7^I+Ja%zjSbr1S;4axssp$IK~Bb19$;G}(%lmG2Z*L)=)X=f|wU zs4!Fp{CKc8>DIxZUh3CLholoalT%6D z(R^)R_$V+>OEXTI;@*gthZl5b}_U?YpIJBT^r(iB&ZL;uN|6RL5@?fN= z>PxPfrs8$UHuZIq1^#u3rpdA6aT(3m@7Smx(J$>UABhoX*%01B!^N?ybaMjP@Gpfb!Y0rIFQw5<_E+EIY02^0M>_>h@*$N zb;<2&1l2d!ny|^&&)WTFJlzj-iX-r|mK%V~Y&3yFC<}azV?zYW!$4{TLJq~&tc(JP z5%{z85=J4=2f+zQysKS`@<>C#Ve^)cEl6kq`2$dC<2DJznU1CffI$3X+Z1_91^f#9 zxBa6$pHsOdtsC$I=nZhI!?b}dpbWnN3h+_LbJO)gxV-?PxEG-u1hK_6)*i-~UxECv z>8#Gg#rM^X)R+1%@PFX1p1H<%cG>1q*nm+_<{s{J0&<9QJx?uMuLYACFzGGd`Qk0n~qc7i1NmIhT-6s z`ftX1AmW^pQv78)&D-CS#%J8Hw6L(^v=QX!id1ymMFj&n>G}~?14r7sGz-P5^e{3a zk5x{_79LkGtR|5+Jq63P?OPkx^Zu;YLkXP?zT0`Ww(?+_yEK0Gq;Fq@_>pqM zbbR(VqsFUtK&r`6)UV>ys(aRIc&Gcg`0Q|DWnePWjk3oH8Jk@awtE-t!21ZiXc=)X z+cjK*=s+fS>Jm8>`fbHz03;{2Au`t2!>QkjOdF7{e?&_+Le%PmKhTupe> z)D@*mc_|J3wCYQsVWy=Uf3i|B-b(V^ic!IDC%adRYo$y-Su4cG=M}ap9JR*vN~Yst z>jb{tf2Y!eMy->|cNu@Jq#-8ysRg@#UE z)ivw6k$1tG!VfYGJI8)&^q@7mUx5{$*CJ4WozVAt`t0WHT>GBG>7w%r;_~h&c(yBW zuUW5Kh=vPg-cz($c9v25DRHCvySuc^_**FjZzDH-*_860EA{YGP}|csRmq}yKp*dX z`5#91RW2*xE%E93is5II$_IBrT30PxxPA~8b>XBIrA7%cCDNynNkxO7dWJU z+sEg308n3uypzTXsql|C^Rs3>E570%!)m=c;O6gibzrL$yD9K0Qh#^RT=8?LDANi} z2U2ZOUAQj5;+;pAXf5qiDp7QQj{X~jV*&wpjG6CE|A`B$PGcJV0v@ zItv68T1e;k3$F{?0J;-vI!8&pjv1k#uA>yvLIJkJY6mQJTBLL(>Mu+$N-@@y;|Hv1 zK-6IGpmg8}6t;k#f>KD8K?zXWfT~V4WuZKM+$!(Cq`uxk3beQ}t`fi}jol5vZ55s6 zt-i4#LGAW}!Md^gBCXdz5(lDq=vXNH5^23bYU|j2iRdgmq#xqjjNPBp{y7{9&r^WJ zbBJ^A4btF9aQmPRCmVF)|5&QyxP2@@c$B)6ORxi2I>s#ON)$&+DIM!fR;R0Ei@^Pd zf_4cMITF&3o^;}4PHa^2yr*3#?zy8|bJB&ZGC_VAAXahldW3$rs$|Q_4gx^o>!$S~ z2h$5c`!Y-AOMedK8{>!4%9T$>HJj*W;cqs!N~ignGRTnsQU2R*1STG|J7{+>Vatix z-Ld*Tx5lE9+sC5hLry`)#MZ>6+)W6giy}JY!%4BLKDJE&J^X48m<7~*8=YSDQ2-%x z3_UHPHH{VZ6O%(g8d=CQ(4VL?HAkZFqKH!nW@*a_h!3M1k$eiEhCn+Fk=*00K#~>Pt2(>$xql{ zjVz`wyln}fW%Kn*UCN1JvAJHPusO>CzS^8iIVmjN@1?;$z$m@pn@aF6I{Bs}u5acn zUP;8(e~_+A#s&^a0Ijt28!6eHoj_Zf4(S8|qpLB_MUQ%glnu}3T!H13=t^58X2%A5 z&si7~#Bv!fng=&T&j*fXI|L3DS6Dj@U~<_4UM+u;?oXpg*8`?fPghg^vcAB>WBoT@ zyooLfmQTTwgC%{*xWBwXOw+F}Cubw&yUC;u|HC6n{s}c%Rtdw%-$j>0+T_&w_8oKr zG|isv+i7ckjdWY{l5!~7m*Q%m;Wd8};rf&8&KA^PfOeaA$!1*IiEx(5bp#R2R6Qv+ zTWa@KCnK_-|C7CBr)+#nb5FCB=T^_{u1_Yw~=V=Y-f0 zeTM|@EKWA~6ANI@;);5e8k5d~55>!|iP!`KDcIdzpR0*Y00$RF7G9_^PStl>ZV(H; zEm`>C6X_|aH}DKOYb-o&5bKllG~f;Jb~$4Hg6;06K-{N3#8WK2YUF#j^G~g-l}{JE zc1~=jyQUZ|HhsO$=-g(sE#_NH`ijeC@kOpz|G4#gc~fii^Ix4qd?Ozhm=8z(dA(U8 zxUfIWvNYhtx^8{r$X=Zp-tTgYK9=VobwmTV(!_7EKV&40P&{Cl1UV%Wyor$PfwBm$ zi~da*I+-%GDi1+QQwbdsnXtcN@}Dh@9)LDg-f+*qe}U}}3^et(S04u}q5+Hsc^k?2 zn|rj={+a*oG1Zs*6BVWAreQgXM>%ez=D4T1T!1s6eNAoEg{$gSa5v;R>cb|JxB65d z-gi+3e1~EB^~pNr4S>{xRn_Hok?M0^$d#@kOJoO8tq8GmmvWxiJVgr{pvs;&qW0 zeu}(!R<=blrLvT*0iGOS!GnN%mXdNn?V52BQh<1mQs%ix>kk7b%Bf*<$kYg@0ICb^ z3mGJkUUT~)>g)yaI8aXEWTPs z`gMH|-^)j*XxYlgmfEHZUOMxcr{4XdxpSN6k@%v`gY%x|k;#=`0z!Z8{<$H?e^OR< z$IisG8Bq|OeZ_Nt9!))KIj%T3U1=9m>eTY$c7;v~ehgg3G&7mcpW zTUNifye|5RH~nOlJrOwQQA06;p{)CD9at`7)eajGk^1XRH(cs}?`SDMjYZYdIJbov z!*pW1+-}58$qOh+g4Srr#jEZB01}06#`eSXKSRpbKTZtY;EIb#v8pNqtO>oJb{Deg zaP7~aNhhe!rR~7b!Mq8e2(E?u+r^j_eNFo-Dsr!F}9IF$iHkEA3 zv?)3UdT$2;Y9~fWLAA_L81!-K1%ru21cEmmc6wY#*WxwlI%fdTc+)3N@f~hh7>f!6 zfS)J9B{9NIi7EoXFo12@N1B-qD>48e9&q&(2i~X6J9MbM7gcOqPB*gZhfAJ<%EYcd3jy2HEZoA5I~X6vEPNiOV59!?LFnxM zT0UgomY`j=rKQ(Tq)x}mduO2xQ7AGGnmaiW7yCY63f;~6JhA*dQoe2`*Kj{j@yqhH zlI2TLzlEb#*t%@D1BBx$Q&(Q6cZTis#iHn@NGrA4H13i?iSjCkfwA;awm%@S@{At(F9@z`;BT22_qqJs>ST6N zIzzu2T-IRdY$8p$C>M+9Xf9b_G>Ko#eq9q@kNh01al4W|dDZAq-Va*+qjJ#KB<5sv zp|VLHd3Zr~)*XGG`CVWAhpKmXl6JX#rGQ=#aW1>-V@7qk>!nImmA$)1f6SziLh@84 zIg+PuAb+_O5?7UQj1<1hc9(5-mLMB1*tHUIZ4u<)@90+>A$DhlWOzebjPJ~kSd687Kv|=!4=Stc0V9@m0#7#xI5k@z-9vTUbh4xC>Ao7f2lBNHlk5U+-#LVtW^QI+VW zt+G@F^4Y>Q)ZAaF&pXew-#V9^SK6vm7A2*@*;!g*io)>GcS)RbkseA6><0z!3q9ub zc!l+6KJPxB)m{(+T)y|g8%*r67}O#83hKN4w%{-MvT28@^&7*42cs`mCokJ#eO_j0 zv}<)(OtxFL*7QcJLQ0TbGNEW!=7BG~EX(%%Q=y#Hzv&m}WjTg>N z&acH>H1oDMTfh3R?((|Fwr*dxTrr!Pd=hsjds zEKhfT{WEqh@s3&G*srQCUt@RY%-t}{ss14TjZ&4cTeVxbp78K6@_{KS))XD7Dm|XK zJLm0`zx3QKbHc;;oZ+h(SIi0HQL5b{@JRcbygP?C?T>kwBOdeb9#1sg=x{CKTT$U!ZN+m|CLB5An5&c9cU4EuOi-~X+f=E| zC=}P%t4B-s2j_=$kF;=(Q>uRLdtg+y9SuU-pfxcv@`!_jaF&R4IHZ3hx=2U0{p5@^ri9_Q(nj zN*64p1pZZECW_|aWA{8QUcFrsx%l*`Le_8g`7PyPTDF6hx$o+8iSH^u8SEbpRqJ&e zcGb70y%jWN^RCh43ir^mzj}cU-u7&9yU*M$GXhT{eqv6jUNncI-lh6AagXZQ-9*;} z{gHpxIrth#Ci__KlcJ1<)^t&9hnT0jg#RNR>4m9EHRt#;aRSG*g*M^kuMx!4D3M$5>LUSk63Par>U4z3JVoD34RzO<+ycY6IXV1UzU8@{|N--N3+ws#}%L2ref-p zQW;WP-Z&`r$6F!pM9Lm1+>jvfP5y-sgoe)>#|ItC zXdEh$?PFvF|7}C^(>FLWnVA`jWI3_8!zxexO4#0GikG^LEe60(u-tJA-tr)?;QlF7 zYSxw8+T@xB*V^Atxq7?jTb(Zow2v$7S-xnX_d;aBF!DP;zs~1ORM&B3OY>o?XOmIt z-|{pfe{o#DV#Y|{-{|)$X_N3}?4b;kYCFP?xE1>*JXgQp{`itAaMR`a11OH*;Wo!{ zo(eUNP#X1xLuluc)UtTMkSgHxUBaHW-P&ryRAQQV-~*XzCA}YlelO3h=BB0v52w=A zERvL-;41gCgsgNuT`8)ALom0Yq!%e)f75e0+TicpsMfm08T_4pj>lRAW$GW5~l8Zkynn@L7rk9mlnq_~U&zYzX zGOY7=4bpvwx|bh>G4y8#2jY@*t(|Ec+8i+g)+U#5GmpvRe|uWMj+mw+#|TlUOy9BZ)C+A2P9Edg6O za!(0=w{KuQv8RyFB+hF_)Zg{Iuc^sM6|loTd>}rL)JLa>vKhfnIA`b3%AVwFoUxu{ zwyGM6s8%(f)kn?h6EcHc`Qo>R3PcK3eS>@0ime5@7{6mF?8PzZM(>GLmm=e*U`u&(}A?Do|gp zZ+m@iO%WWgI^I+`LeO1UNmoP(i16PPntH#R5?B&pStXO0^;vH z94q6(=|938kng!w8rR@D#cGu6{e$JhFriO8atUnubc;wJ5+a6_6Px<28KyYN%}rOb zYgn0PxQk|+E;T$4QgixvvK=fE`o1q>b zTOx-U`*dVmO2f4Hv8oJB#yz;#dpxdSfWO+95qn^*FA>mIEb%*I8KQ!FkSfmm>S|_C zrNBOjYJ01K5z7(WP(Pl^bf#V)GQt_Q1Q{#@2wPLBY-;6}z({H((|Jo^gfhE$A-gc} z8H`;94~wr*%M>&QEKTJqhv@X*;0QX@upRqOKjN=DDGp44+zAQ#44=|(%fjT_l8 zdBXjMYv$*3@tZdoWF&t0$*rhDt!Uf0kEH5Mc>RcgUF(juJn7c=l(}5eV0+^4`zCXD zLlTVF=T_swr8l-eQej@uZ%7Mco^i6>Mf9CJR^7lKFTDIN5pTK8%}UH7al%}BCceupXt8jk2kG8)P6q^eRz-*b^oic+<{$+u*Non8{ zMmB00d`v}M`IM?@bZu`}e{6PNqYARXJ?D9%tT^LGUUfa|5-r^loP{Ona#Z%$nwqi# zuX0#EHc>mnysoByvAViE!+kKN^iyGOqVZNIX{WT%;A?RMuGjJ}>S4xR+H^L^J(V`e z+p9(H7Y`cIdwX+_guf8yCMNgt3b${?Nc)VF^P-QCiFA7Jpt!y#KRCQ=e8psLaE+r! z8$sJrZ`M+W(H>->JtOp|shxRvy@vW@*k@3eEf(|YHta}_iggXag>qdf6f(gjw$h76 z_4on6`08pJIiq3dN1k!7L~;x|M2WHF^l6EiL;va1{gesjxKqYv740IkwPwB^N&Wp; zwZF}X*#S7qaZmSV+vatGU~hcexy_H+u143=t8=d74E0p{yh6uS2B||h%PPttoTWrm zxFzKhDZ73p`7+FMuK#v|uMbO=eHRzsdU2c^XZ%`m<)GT6E6UgH*OCxWkpQ-2C_AgV$o{%2#>@M^7gm z8RR^E}ZnSQ0?GW0pT!49kFGauro^58>}mUbwMumHEro0B=i}Qm z(+d7|w@chc=jutd-cM0F*Owr+so3h7{735@&bsu${KFTw3JpX0-0pN2Qx6)S8I>zN zmB0N#;5Dx;#fJ>{4r^#;3{FSn8>u!FlCu3C(l(}Pf0Qi^yR}o}(r@kq&vo8=NCYSB zb_O=BSJzrrcADFMp`}c?@vpCism@c&P?$jjIh(R3RG&19VY^`xuX=u}e(EMZ-m{p> zNc}dv3%fs)@X3IP$_x36;W*X(rsmMr{{A{y)nXoTk2!;NV`xKL7ulizVw-ACjGPYD{ z$2YcAN(tH!FO|fTt3}%9GK*z0+Nk)Ve2QF^Gx)=bI46a=jY__<4A%1Qw=;J?e{k_? zNy^FS*{+IQC2c=%o9lKHMCOi%Jw&LJ0>@(X3V7QQC#y<9OLI<1VTj*+EGlpGSMmfK z*}C0hd{shjy$>_lXA}h=ZUz-BAqT}1IM$yC;~XkozAJWflqwOnYn96#lH z(f9P`%-fogqY2Wg*?nJnMadtl=r&}>#JNVTA72AvB&&5k8nnbGOF#eNNR{)m_<>4W zPZ09iEIx)+5U!uPec3FAb?WF~L8_X|L$^l!jPTNa{2q7JS6ex5i5!+3$L&(g=2Xi^W&ge5fBKqGze$xYLw-sy@3bGx9-t^g$BRM1*?B@G`IWc|zB8JK zl<#5M@3#^D)un&uU&&ci?@5gl9={}|UcX@;;Tf7x$eYCL2GA=~Pnnz3WNEUXH{ko+ zWAJ7KCun%!1+Lm;upLjs16#%MG;&liPzD{@fFB&F2dd#yOtL_EQfXO;XyxlY+!c}u z?ut}C-rU||7QcPfjv84X#_1Y!JY7}eQPm}rQm2M=DGFvY+E7U;q`PO&o)D4YG9@lr znv2_|#8q^O%tYQ7k@QHYKxcH;9?~-6GMp}JsZgDPM5s?x#>q+^Bz<|Hxq9Kt?5Wqk z99pwx&1|pYqHf|t$H3pIb{|!V{HOn)iFf}!3Ge^)T(Kt|rQu6Q=?P_zg>5{yJdLfb zi*iQE@=&rId&Zh|bW>L;zJadOniAA3R`^yh^#L;-O!Ai=V$8{)r0W87duL2=0U4Q; zj9PA3sXt}D!!x2SsEWOFkrS&OWUL?ZIU8g$oJ1gQmukw8)4=F1|B+#VR&jo|NHVFY zRr>j0P$eXf^ZP}UiZW^iV2^i5GSBg6Q^+~%fLq9nc=A9H2)TNY97id8@+mgHYKd>w zZvI1F9et?i^1sX1PqG;J5a5NG2Pi4?klEK&)jZa7fRcn{ozV&MM?7qq!(;z?D-<;P zA@o8mf{Zs%n*0F+1F9;3A<8G;S@m`J+1tlRj^NNNl0HZWgFgWso)R#R?aysF3b@0v zVSmUOWDAIg1wjIveWH9}uOe340XSzsIh&$>6_JLsKI^ZROq?(cIX_P^E$L|WFCo~G z=cC*G2a=+*CG^d96uMiB3caN7Cb`6|A^Iwv($}I&FZ8;nqCat5Wx$1ZZ}0zkG5*hT z{q|f6bsBsXjnizEDfoo0IVA#$$JvUy<15vj>d25-o?6F|FubrR6fTuT72u^)tC+t^ zDO>Wi7#iqmp?q=bLtd#d)%e)>DLGwrT?I9^V+lra04>E`p!ICqzzDyBH?*lt6BF6mDdgSx_~{RVUrySYutWtP5PKoa>@4xqb_)il&?7C)Fo~^)TAhXLwh> zICG&Z#L~EEu+EDHF9y(%9+=sc0&p|O5~Wb2{&q55WtX827yqaJ@*RsSJDMPKk*=GE zZ1w{+Ca!5b9n=1Cja>EPdoo6kB{ z!e^F_m2ebY_dbqqr%rqyZdvFybWCiwjCjR0+)DL~JTA^kt*^i(UCo~Wdt2Pi8uqei zS1mt|+o4a_9m?e6+*03%>%yUcIRv1gqYBkUS|aKBOCr(WAJ%v?_gY2iRo+}>;+vjt z`+B~-JN)&5CJYZ#()*?Z+tkq`F}`L{wG-M|K=6nCp?%)LwBJbEAaSQ z1&_C-#|b<{!dRh_ln^!5#d-Pm!riX8<)T=sIF;RK#2B%$rldF&yrPqIBop~=pCZrq z4@&52J+FLvj!e`Y$(uaF=uW%Q7{k1o>Ci@ZxSDCZgKp~+Gb4N*_R?gB$64Z$(vj%! zw3~T_hBnS;R@@2-9j_*>n+OUNjtFe5SR(@K8RQvVKD(zo?YgAw#>>#zq_cSwe58CE z%uW)Pp$`35`KCz9I$p-q?BDri@MR3mrbNEy?;{nOFWEK~YJY~lOFDahB6QYfZ4Pcm z)H#6rLbuEweY5C0L<>8+I^l5BD7QR%GHIu6)47T0nJ$N>a^InuF5A(;cVTSO>coeSm#6=o?XzQ!#a?c1jFbs=53u)f`=@+;FbFJ-y zkWW>o2fBIlTmd^;ExE&Y&y4u%XLI$Lzvi2pi(#WCJv??S=@v*`4-UP zWVLJJ_cs2!eeMwWO@Kw-|B{O>WpLIW^ENe=(9O^M_}bh|Za(v4o}2jW+Lj}U&77u0 z*pO)lfGYic*}jkiPq!!E0-q}Y97gzsl@Qk5Pl7!b1b3naCObNO@A!5u!o#nV9luUQ zy^b1q?Ry7AYCDrb@iql=wOWwYL7r&<4Pm7%{h4W|nM_n{FD4gDO57fq8x(fV*ccGk z753d!$#PYdp6kGe`#TMr<-CO@<&1h${r7uvMB%C~FL$eEJpb?Io4Srw-EYlSQEB(p z^wZBWdYD5AJfWG;qokK17ja56xL4b**FEAC#Xvn_B74K8va?p9_bKvcIzlCP_gPmC8kqDFfJ}zE*6IeNb?S`&~#{8>m^TK&c7- zokif^`ObBr=Sen*P6=3-v^HeGIO}5JFdqyQBYv_xdeNma`lw<8fMVVr&)cYDZb;Ln19H3=S8C-Dn z6B`NQElL#p{7CI-i6{!Hwg zc{3y8TsJMrkv{pRWb;Okh4&}ow47_Ea_ZfP%17wLil!Rl_c4Ee8T?djNbKn8QNUM_ zPE{OD?tJFZ7Vj>U3Oy1#T)#hQ-^Sat990Vi^RwcW77AwitJ89vk7{dcCp!#nK;?8j z3O}0E^z$fnb=JbrV)L|vNaxMM_ta%4S2PexY_e5w zV2hKmd7k6ZP3mZT+4xcr9XdI|kD0Zxrdfl5Zdu2M5{-_g8!s07Uu3_4L;GLk{d%Jb z-VwZ7^o^MaUG&|%$hMYP#>Yr#HnY|?G#k1G`Rsh4!9wGU!I6bj8zhhuYns2;=yR&cCEEoKE~1*%^K zYve(^5X<#AzKeJvoKZ(ILAY06Z8MwmlZ-ZPOrp&MNb+MOB?Eqe{w1~5Jv$45n>90- zU9x$(c}{7HPRqj4?m;t8iB6xJq~L-cUSAm$X=?488ZOq5KCR>A%zVn)^b*a$uCF=1 zU-L@9blJ-%clSG%S(F|#spggPmA*IBDgPSsIGy~&%M+9CqM6He(j<=7o+>(U?@R_0 zTOh!?qE(jPRnS|=*oUshM)vLRdy)`~ewdzLryK@iorQ$Hdib|9xDme1N^VOdjuVkF1B*X_wZMH;%2~cUyR>bbtRTIQU!cYTM z#>^vK&tu1^iAn7Jt>;RSUlBR((i8x7umsHg99s}=>MU}{yHh2Q2fZH}&C=MUW5*~8 zh@Ws*b{EK=--Ch)c7u~9XtP0K3M4EV$V=b04ak5B6OwKl--1fAC&h(7AX8is_NVXu zbEAHfNxct2K>2{KeWaXi3+CwdNGe1GWF*mnU4c+2{31HqYco=FC`GczId9t*hLG}o zZupw8935lPt4&AWMWJ+2Ao6Vd0&FXgV}_j%D^Zlph=4Mh)RP3b$mtEK65g7$ztWQx z=hVg?;1_{GHD|eK79oo?rSl)>hs=j6W)~I{JCf(cACqszj^7i1?np7)s%=(ocFrO8 zX8ImtO1eiD=_t|h1WvbCl6C^8sh`N0J}%1D$D)Gx+C+4&zSxL?MRC(TjxQ3yr+r*S zf04*I4upm=p?)HBTzOn|&SmT_H5Sf70!~8;zSJ3~l(R_Ey-OWeo<1(tWsa-J#|Gj` z8DevZ*nF%PtH>u4d?I;h*?iIDYM5~h$YQlF5c^lpUd`_pjd|ScJY~4qLanGT?0cDC zL8CU?LWkH7?%cVU?qkNem>wnp?Y_iGhvENy^IY};aNq9cJ!;|R0MYyn%^qyG+2$pL z*C8{16`#%?=ovAm5GMDS&-*0{_E_Ib9Qd7gZKw=-%n_P-*%85B-u%muY)uuD%NhKsv$s=4f@KCqy2iE5!3bK?^N{sDa+rj>CkBjCXrRS53@_ z=l$pXzjBiW9qc`DZ~*k4N&e6iQ&e*Zi+~b@^U=+b^8L<;!z!WU_x9<%N84f;uG}*- zUz^Q~o6juX`{xSH$Q#JDK1Y*3lbcIa`=50@d$#cG^n#!h*v}a=J)-93q-Hq%S1xqO zOBdX;rWkIT4yQ@-hcT~2!i%M4SC+eYde7CHBsOGMm`JkEljknKt#2kXeRj+>LeB}j z{AqvQu-SMN!Ie@9QQU{I0jkmunFekJa?ms7IyI<*2UTP*W*jF$|-G;Z0LpGq4L_OpKNiq$U_ir(=5O zD|N|mVJ&GmZ!ug=H_0EaOHqRH6Mr(57mhdNrA?$OJ387+=^&SbXI3ik07XoP15zaE z9IP&~IWdq81Dj(3YD|b1;v8{~>6|$W9EqQKh0ML&9qb!;ne4fpoUeX+VS9FG&tD5M zuHRx@Kkt(tcXa)*XvaUZDSzMQlNUPTEq0w=@$US|uF}mXYtC$v-I4L~PN}|v9iGR& zxXF3a@b&JSwZD{|insFR?lox19t{5e*TdcW_jxS*9&f*nIDhWS;akl&%NCzWP8E&3 zev~9bd4Fl&-k&cQ%U;g*b#&IHbe^3RT)9seKewD3*!4wC`PZk{KHbx7T)Q{5`8ZqQ zB#uhVTvxNA7L$o1V8%j&Kg728dJZ?z=`s*JcHMK*kODl01p(!(RR9$r4pZPhOew*( znX}$02;X5--wF>RBkQ!D)iTu2n!XB*SX)t7Cu4t+NcgtpP3dRNAf=lbygmw!g3fhG zy%~{O6)%Gw)I|1!mPV_r=)k0cMXBu}uw9sE3m}*EAs1AM)c}ZjHh`3Gm8B~|$rJn+ zsK0DS)+cLcVYPytVA=3&&>!@QkNcm&atDHr$zq<9t~dT&Q9=0zv7EL;QlecHzxYV^H z~q z?^TS>yLzbmw^PbTiq&?X*nBeFqx?ac!(2&x@vB!j1+y=49~qK~k{1^`cQ{N*@2+`# zZ;`ZC@FHez;hCiQN8HB0|Mk}w>c1{s_*jVRgT z%1^q58Jv%rb9Xjsf%{+IC9>Q%UpeUvOb)8T7r(@3h&3;fhxmS2g~s0#qLW1vR)k5s z99$u_4vz=Uga>djdAuYxu;E4MP51gmL?smwns|=dq$yH&CshizvLooq>P)-8l-2B+ z-8`z1Ubk|oKJ!!!)tFh+4N5BRg?sk~RUMxDZ_zA$czm$5{N5R2Y3aS={r3hd=px5; z{UUc{rr;ow9V{;uZ3IW$dui@xj6{y^DsV}H)Z{RpEe*U2Dd`yqvv8Cp1E>tn9k$dA zfIOuoF!2Czr}Bb;un0GuK9LId%hqG-@xWeJQfjnfW}M#Ol`$daRykHB#XW3^9n+Xt zkxfy`;xJfRSyK8$2YYR;x7xajN?lcAqJyoiV;1(VuC>XU9^Tp<( z6Zl)iL(}=4a~mABr^8Yt(_u%6NUGJiw&)xTU?5j31+wZ6OU53E&My)t3Q&gV>+`9n z3Cqgbj`tfT`9%aS3|^~Az`{6zB~U5yEyHr@Oot3%Ywm57#gh?JreEA+a>`yBfq)T|$4xsYuf~*6kgd3BRZhx))|Qy{ z{vaVnw%;0QQ-~!g5W}y7f*3@FT>majTY-b%^Q&7RHtVIxP`~yTj{*qXwl{z2?0}GC zHJh6MK!}0j=wGCZOCXQ(muTG`bwd)#z2KpR!iAIz63Fj*^Wn$ll_>#VQiAWV`FUz% z@S`2yYd5}M99`ThNk0AN&B={#KGM%gPHjB8Nz(afTgv;^jjJUqP96Sx#oW3vi6pMF zZ}p-)pYtV7x6hh5XCXglE5vNvds_lIvfuqa9X_zebhPAZ=tdJC&xi!yXL7i7=RrZx zhhGmmUOtf9bY|v3gw}Pb%I9u3ZHygv)OMd)=2;6jUZdvftx#_13={_-^5m@&xGz+1 z^-vk~#^tIT5Pveayk-);aJgP81B9f~^oUa4`FVa|SvG5w&v^z`D`}YqM06~}|EwW_ z*L^W^Q4i&R@#?`#{v0DS$N&Wf$XzzRVE;SVt%k@`5bfY>Q8^U?WwgNyDn74QtxO=G#{xl$jzU51Bk^vc)u#nU*2z4e2LE0p-mo}JvU~{Kd_jdtdrg8NEx<^w7pi7du zrnk*xm*nka30c2Ux!fQ&pTuK0k1MCg9>=H33n7`^IW;C$UYM8eY>>`sBD*B6fD3AT=vXLoFmck$8kT{MpBDfoZNd-JfS&aUlyXC#mS36PK=Xc$Bw0g?a; zNF^X3A}Ru6pj5*QN)?oWtWB(F6&tC9Mml7=Y`DhWm1?|x1c7|=tTkZ4umX*5 zklk&v4zg|UKm0nQp%q3Opp+K02UqDm$hnY1t9`$51^aiMJ)kd;BQ$}T9 z7iROLUA;>9CD_1WpN=;sp>Vg>=6X?c4;EaK+D|`UCl4Iuau~&Es)0+^X|J`1+(tyg z;(BpJf)`wga}rka6I&kM3}Kk*QS4pbo3`)qZiM7U4rDIm8A^K&BVA6|Z4U^wS7ERn zxRi%+0hrM)yE6{H5a>TCwhNq@?oFZ^7qkE$g&;yvwW8%ZRA*~9f~rt6aau#?e@Q2# ziVQ17mfL+=*%p})?(h@>L6}{ImPG3Vkx_s zd49~@h)~pE4Dd~+19+K3#iBxm5Ke=U0w_X#!Z>8#(6Di(Qu>YSsur-m)q%QirA-q7 zv^}7D1WgXeX%Ya!%0>C_(O?do4AteiGws*z8JP%tvw!#-M$xUq@q}t%IyzUk=`M>0Tt zhliMP5HjM?0IrE*gf8VEGl(IpHWHV_$C9x+ZS-?o2QG;l%GE}H_}D=I{o|@Es#Tjd z-(ndypp98Xv;s&`0-`{OX&8yy2%;^)irmFQrr~bFKp84QqDJExczT{oPp|($swEoV z8~l5eY4)~|f8yKyP*LPL!uAq-iT>I)R(JEeNUMjrqOz;)-X5&a02^5&lrY%wRl`$pJp1_I^AzE3W&pj+8`ItOX1^f}WWv|LI zp!xI7jnuh=s#aJpikiJR+bav9Av_E#g(PHSK9H--x% zC&qthUi)cI1|wOOCuVgADHz zkyi1(?%v&cS_+Re5j>co%(Bv7(vRIeo(gob8GhIwnWet~i0l{o@o!BNjwhrg92Ws+ z-Lu)_`RKEoaX>gLLF0Rc_dDoQwg~>=*DZ9qGv3_$9A&iA%f1ChJFw11Qh&KSoIpRG zV2Tcwt=k2ITQ{G==$YBf9zIXP;2G@xbLlT7`c8M8_H$g%!CqW7+_bP%pqQzL6IeIt z<>V<^}$@?m5NFaRpmWZ1qrj z+`Rm5>u5t}2_yXsKyQvbDIW%78nCE|Zatp^Y>if6U?iP+71rEC2GE8G{D@Ex20$}r z*c9AFa-APc9stWGCuhSlascRtfZ4HO`6lmcyMfv<)9Ec?cz@xA#{?T3kW%t^g=+epp>0txZ#C6;TcG2LpVT0Q9dcDVJxa5>H@1-#A;57y1?t>_4zy; zyb%PMl#@R9{>SYXbqw1!Co{FB-E33pK|>RFw-!T_gkhgkos}CL`N)nQq-7KXphZTXP{mSHIl{itOeo_QdvKNm)C0(LZY@3xbVZP z47Av+%#qa6+_H7T6s_63%4L{rqD8FA+JE%O}k)P2t-1+ z6gPI4^btjBLXmXg4p1ap$J_u?xLuHC1ONU0C++vY7I6IASNK?A%K&zIz8mGT;S{jX zv4?SouzXz9@`DUpzt*Z-?lCKj`m;F>aXqD*LC^*Q=)=M$^=1{sx4)Jc!mnx+~-$EN5C92Cl z1_XN&aQPJ`zcLQ@13SqWv1s2QZEz`ZDM7WDiieRu%fr_c&qkv^V$_;W+>zf+%=FJvc5e^dj{i+s11s5YGM)&^IAe*bj!IQu`)!!DO=VOYB35mhd}50~v&VnAc<>W`ZHa4StDz zLXaR+ z(7Dn3+T%=$3mPgiE+Qhu?xj*h1=CJyL$InwN)C+39KR=Rz^#SMPU`#A@F#F3gA~p2 zRPj5oDmj5dKU0SxS+nRe*K8W*9T$1rc@5?z?Fl6_J|$&9{S5!*q3^eFNrcPjo!5idoHI@E8;W+BG;_(a)0i}h)X4@g7l@7Tn4XkjROCFX6v}xW^S8|LRyc@~b5VP{7c&{|PK`E%yVd-KGY-B6+8c_adI~+WuvZh6z z-FG+(r{G;ohne4YEY1}f!`+!emJS9i8L&mqsfKm$jMnjeH9)@tzL}E)pU3-m?U1WS zmmKN3`)H|IR%3&nLVG|obXkvnjEP{5?NgA3_DSdx1`?nOfOAauRi!h+yJXd14Py{o~ z5T0R^N?U>`*i86QumRK#_MzgfqSs2yC2iSD8SvOgt;w~6*1zugX*V)=kFHAR6A$6K z1J50=d~>vd31D@xB<$svyAkVOYb0c(1j7@e)byWI8gO{5YVIDL#~l36NP|8a-zP5f zb>RH$^iTh40fvj+hycSK%pJtYMwtjv;m-jM-Z;>Bxk|rcnI3T9D;NkApkjEY=!R8@ za!5oZv7->?Vj`YN&Ba2mR@kZs{a7Xs5s&hyylf_k$5x3|YN=Y9?M1{>*hCx-VXLw* zL@gr1L~=3GEDR|d?P6!Z7R16<1lG|bQqc~wD~~G1NO6ca8&=hVZZTCXR%L;)3=>1z zL&U>M80aipAh~RnG#gRjz+IV%cFNge>_726bqai55~keK%N)e_o@_|X7uis}JPW4R z6jzi`mt(V)64)_pUY3Z)_x*wq;MZ+S`{(>@GdFQ= zHnzo}9TSCDW@-Mc8MLCS zqDSAVuQ{Tlj~+Z6z)TVgj+sGSM&iZl4|clxX^j<7W03*`eyOg!6!dAqY>JE`C8@H- zgDGsh7%!HpWHgBu5%NkTH7*Ma&#`ztJX3)9A%1!^X)7e4FooL%dRkbLDU%n=6c8y= z_!^;GcrdQAiwoQs=WiFc*gMYubyP}Z^hs*e_Qq&@ zBp9jrHbo?!X<*E>(^2fxDb5|y!S1#sb>OAe#)J{{F=tr9Rux+IAoVgyT?u}(%1%TG ztPd#&?JgGN1XeD#2~viBDt@;wND1~u9GleV3>}s?SO;|g-M2&s41#3nPmZiM*bRmL z##y2hkFNpRu%JjzyX}TL8*M{by8sZ5VdzYD*ug?NbkyI-h)WkV!V-3{3uS0GUBiKG zqQ*nZ2+Ig7_#t+oXZbfd!*O7l6g)%IMdPb+t%CmZwvgjsLQuk_3l!j4gtH}vS_=1W7Zk>so2bC&2>gi7c)Jk8XtlHk zmT)iD1$PGQiGHAgr;+g26|@S$6>vc^H^C=r1q`NDTMFI#&|j9&JxK37?hNu0-Gb`n zN^m&>Z=>SbiBz`_?bFYC9}g{mg!w#xk(O(;|77GOUAz*=CT!WFP5X-My1$RFx;e_a zo6zR1@o6P>^e20cka9(Bdz>E&Xv`ztcad+ZNV`%U0Fg6vPhuKwmt+J?YhF$ z__4Bk78Y|EJIgenKhwVDdWzWUIrM&ErI%@$mc0#gt*Es4K*4mclZ~qNhJmWBHw(8w zt9OwOzp&R8I(`TZOF&25PUR2FTKD6*)bC4lLS*-fp96FTu%-#00TELH3?bD+Oe)H) zsIh0$qx&&@7bRA_{aM7~_RX!s)mX5t>8GCsfJW3g8G%tWKjVkUmQhB;Vs&&s{n_>z zKJXsL^>BXP;=Y#IrVIXM`ByeHfV~moA^~F@KpxB|9E_#3P%h}S^&G|>7Ak=`CTt6B3!EF3xc(Udr0SF$kEt;225orTh25O)p>t~}TId`D9cRXSpjKBebiE&udv*PF? z=~5W2Y=)0%0hp=O-a(CZ)}VGeV7L+zh0XH{`j%n)d@Z}qcr+8=t<$vnfs643){6R5 zUvCa!WFvwJIK6k}`(q~;uG_FP~6x%Gu@|nYt&&$8qaJ z{rvSZ&)3zEpm^o^tU~kl#CyYuz|0-1#{~WMQzI1J;t5J?^h>c%^?)A!E(mBPvuP4- znRcai1jf5{5!a}0ZY|ewq^Z4HkWZSVhYqj!mQGvkSF{SqG<2ZHT}s9Pw*vaGm@oj& zt+D6P#2$02?aj|Nq9l>)@GB3555(RaV7_Sk-Qo~IoI*CFV@}?}dt6?}a2mEtd0ZSC$1ig0#C$c3EMvKAvir9i4Y1n)640RDWUTY-y2Zywf4 zmo}m-CLPUqKjfS~CNs(*vbHeg5e(}>=1HAIwS_MqO7VyTj!P!mk-q{~PcSzVz7>F4 zafGiGk95eIB8*OnR_6^Fwb}yEVS?{rq2PNG(^!vLAlA;_&eDJ*USGn*k}ELQGQ=VZ z_u{_Ska{gMHLBl;!UENcu{hFGVo4^c_8N^Q%Xr9$ z=r~PTOIew{%HJqy1@^oBTb%YMb4)GN9xhWBH$RkL+qLTSE9VDoG-}Rih4+I%DkoxZ zeb;CI1~EC5Dh**`>%s8|P#23%#H|l0%#qPUBsq6i`4vSrRMh*Q6xrBCl%eqrw_k*v zW^Z84W*pLGQw)|3*0N|s!l>sUF?XuqQyL$kr$F+;td$Mo!ut|tay?*|Q_0D0z9E&aoKlW%BfNV!JiyXk|;s_XD9q#S@DtFC{T1z^7FH6Pr3<4iuxf+bg- zpB1v-cU?&N`!&9+O*9`|VOmJ}su~mDl^;w#xQ6Tp4%DjZFqj93M;+r=^Ye300hT6z z`o#Jz8bH!?FL<@N&hyp$U8for)jOVE?lkpepZxv9B~#ZwxwCeC+>L$Z&x^?9+|^89 zlEwNSFU)yb8#mvxG3fO45%PIGgB?3b-%p*r@qX5)%~$WP(k$`r`|WeuqCBt9(cV8# z^LB07l$(DFMn-&lzbvG5;4`susXV_|SRe1;5ZyY;h*3vjJB*C#2w7}t+^khCp&Xs% zqSN60V0vU4uL*2=)3nDc_83$L?E?QxwO||xzMODCC_GBt7EYJjIy_I zDh|L$>P9Cs%39iUEsZIOG7$yT=YrmNvlY96b%g zsdGO7#WGrkYvHaL*>Es!zVI}|7_7=l4v{4QNo%hjKM#Z>-&$A-@Je-HF>5T{kUV2YIAvwEZ?uM}S>gHef4JOiu*Zt)XY*)?{Efp}FIVHE{n8 zuUZK^2yj;)hXu}MUK7C0g8M7h!8HSH&UV20y%iEr3;^pnLIPwaHIvbj*EoIDIxf(H z7WhJ=|4zTM;OlFf?A<+IF81J^a$Rz2?puIPOXhi>MvLL~pa{097$d2!)PqJinUT3~GKItC*t8S|BrI_<#ap(gBET$ay z9dYn$XUjyqdPDb3T5vJpG;&Ip&#CUVc6FLA?)LL9Zi?@I;gW`%cjAxXdA-M~oe!N% z?h;ksJpSNbx;pQ0QfafzJr9I<9F6aC?j>U#aql3oIdJ|M34)ya3GY=m*DkoR?(F#e zxjXWydkxB3Cd6?c-`5BG7TPC1$2aM2Sn54&QGMT|Nf7I*=VE7Y-VSqTvE<;9EfW$S zSDWbF&L!QNvv-8K+P~S?I=`r2<-+=^+b>{`k*h=dPxBm`W0TK(flS{k)TumU-fR=& z|L0`Wj)V3An7+oTB+ajlHP;<);3_sBiaN@(j?p?<{#}^j-jR&sE2g_*&$!eq4L;N4 zob%J%gBb@_T4h?D;cJo(#CKND`J;Q_#jRpZ=F^8C{Rd+A9&4&tT36%!`&WC5YpnUt-^h;q9wr^%biX9!wEA4!LlrX}zgbi0W8rvUk8WA0Kh}7{X1W!8U zpE-Co;wx2q`AWyQjR6r&+2Ib$rmHMfe!kWmjP;Loq2s~Br?xg!ajssj#C>D`+lMCu zvw$Bgo!@E2cldJN@|FdeFM9W_DCJxLuyBw~lWw6ps*v_Y6C#@y(G)ZJ!6w)F#zrZ< zDgVURC)#OF2|nJE&njpW(KO#}{(ShgznxeqB00neL*kOBtJX##GE5-*3qDVAQ;IXp zyMomF-4+|f2o*N><6oQ$$*3?`BWx)!a^GZPJBi0sX$6<*rkoylLv zOq%d8oVepgXdnKo)>qJO~i&HDp^^>gloy}ub4=W+c(>!)w-`52Lm zQfzpMDwVooU&JY|Q#Z7x%!qCQnfG7vLtn16wu`Rs6OT8IFx`&_dMk&r`t)K+xH=XV zd4b%s(GpmBb>3f!oWu)V-B{s&O*HtgxDZGJw=s2;XMvlSVCgcky8RmERJpFBdre7Ibp}sJ02!% z$j5{(NAd-dpv^7@9Cp}}KAoh_5)y-CbD&_tjTTNY6xv~^daxdYRElN42(XewtLe-I zm!$pq{aNc-Mk9(`XK7KO-Vu>SEUCC~)j{)cPFiSwQc!#>Ih_6zYnYhf;5fH@SXR5awEqadqS*W{7B98bR%OK6hT_|QYM zBQnem=uyu{L*2NzlgKy{CrB#1An|^%48u5^fATC`ti{R*s#o_mqT3?1X}*a~o0%8! zjL0qNB6)@09}N$V00L-l_$j+p7z|vwd; z=+>?#X`+!dsTmvgPZ&Ms#FERcyX12*ir&b!0fMSNUK5{O zAJ6TM{=x6iq397LDTKk`$2Moi@?#8nyP!zlV%3blWWUohZ4=# zOmILsvX0Q}BrgXTv*6i1=`v}Fw6TOa%RrS*6kRNFNG4;*7-m`1pdZNyc+V+mTpyjKoPX|;VII~e_r5j)?{M+YmfRm&CDT{ZHf@6{uMS?}J~26rJml@hBiRktBk@Hrv39{`!;#|llaiDKaoAT7ES)xGBq>ri2PLvf{E%>Ay0z$nQ4?VnD?YL@N_tp~=f=sU z%VrsL8sQzJzA{zZaj@l;?!t2;Qf#B|F`#mUA#&8ba+$94M0-Xj0#bllTBo!5#9EXR zZ3n&A1sdo(&4HOP`zWE-p2tc`ItcwC4uE8#hX}nb>ze?!gf37V3VWRbTTv3);X-+_ z064TrhJKZ%BH}-dPyTOeY@pL2+wq2j%~x$!viMh2NLst}agPbRA^cC<)pteeJ{W>c zYcEXcs448Iw)e?b8+C+q;E)b-UagUl-$!-bvML#p+%PNi>F{b228(z}vel_bYqJ&G zJ6V1eE2MJ$2+ZRyir*J1*1IsVFkFpQ0qvCx%5YOb;6eD$!`~M8AB3?`1POH4lVb+N z2tRhAz~77?yKTY!I~=P51i7gdIP)+%`tz}Zm*Sg@<*`Tu92o_VhoSJ4(H|JP;*^Gz zGmmc~Ffe`-p`@ju@f~nJu4PW!me%oi`-Qbf@hZK&F|KD-KmRO8i0;@JiS--nZ?D$H z98dp!1bMwdZv?B1+2boQL@-=LLDG)s3LiM8#~Qf1Pp?>_csa1Ifpzz;#Edx#!|32E zj|aMP7&bvnbVWoMexHGAsDaCRTq?py%wRb&>`d|+AbPKL2l^I-Au1_JigU*^@fKR@ zJ%|cJl4c9Xcrf~mWeZi3o8#17=Kia;+-b?{q|=D=Xb?ccJF?ETzrMxK~`OfY9hCbufvwcRmrTmvdP_@P(7KU z&M+=8hGa8&Sh9|{PB%+edfU0#s(GRM4j+y0$bA0KMnO9t?EgEyp@et3lC|hBZ$S4T z9JAnN#KH2`n@(Oq&ck5eAqY*wlkspM+2`Q(ImkNr&&9K{ zP1vP2_@wN_XaCL60QVuMvK(!TxpUVCz4*XX8Y2q^oFpcM3X-tT(Pd50G+Sjfu? z=6{A);LF3nU7k{!jd-5;m}uu&>sbk|03&8YPokc{ixZ!~%|u#<#`h|p^??zzqksJG z`10#aHmmxN90r>+*M9n5&l2L%?vAos^~?5NIojD#{&t1EpQ0hDAZd5`6#55dHT>9{ zwD)8K3@UbaC_2iLdVlOt=$<#(&gIm`2mnp2;uT!Gxh$bm{Q#j8(W0dJRs`=DbokO&7F!jX;8}KH590y6<_@O_WN(+`F}e@ z6Z+&qfC@L&+Ih2pg|yFKw)9mIlsJ$BLrKaue^ww);@N|kO^XcQ8NQqJurz#!qaEeN zX+6hR7;|t2_bOJw#X{jIOW&;`(IvSlR4s&*f`M~_$`x0DyHS0U;!cTcn?TPHs$b!R za!q_~mbGZ83I5ELJmL`E5ysYX38guTaC%NrI2zx*9M-$p$d3NbzvF9D7Hg1qKoZWf z$tlVy3@^?p5`}N~3CAdKSR%Lm@L+UjIHm>@OFZOT=o7x(mm#gXA3^h#v_(*SuMy=m zvFX-*s#{q&-;>{~?yk4T?R)!JZ=uB_J|iWci}TG@TF8ZWqwPsmea<5Jb`yKwip2Ab zA)_zHoWC?v-HnDtPAYX7Rkvv~Go)3IUc)w%E#zoy(SKt%vc!Xz`YsLaA)le}qSVU7 zncvE9hQ3iN#Zj{PpZHL$*NDDNx_9S| zPKJ=-$MBxLJv}9tx~Gtd#j-6YeDM4K&}4Rdu`Dxh2d_l5QY=}Hnt_L%v^UYS|L}0V z8W*tZk?icF-F_R|c2-a3hQIbQgycN7tJ+wYJA0)U&r-WVsy(mzy-aUKWvAO0e64vt zqq6X5$akW z-NnUxJJkDlS8I9uPnAD5#5CyADht20-c+~NH zv+8LN@%e&Tn(kXqL<{Igube@d7~;oao4B*`Z?*3kxOVH8JqwP${9{wyz|r!!tW7NE6R;d>6c-(_V}^*!7n~Z{`?*H6pinjxx>FW1cAQH;!|ZBmel+61^um~OSV!T z)1GKu-ah;DklEnQKXD6i3#7k4fl1f5YF$p5{Toa}U&A^7U;n0Z-hT8fyym&fdje2r zsxJKl2WaumZcAgB(%8t~<2;lV5KX+&aXe*)3MGGxEA8U^epMfLjNQ0w}xHerFbY~ z;i187L{E$9(Jg*^_n2>uG@%5oxd)5F8EQ~ZJJ1gujpe#ojsVYC3mlD}TQg4|&2JC- z$^M|@ce+iF1n**%16m@TG)W#ZN^XbS&oPIHm#lC6WjA4#L@WnsYl%T0X-uFpfYZc8;7x-5>#zmG7EZK z&;bHXF35o4e~Iyoeo!;-n;~xHmT$%^+uXKOPTn#l0~BL8=Z%K*Xa=|umGSVfoZOx> ze*W22S?dJrTP1O=BLuaxC7*%D_Lu9wOpR>0y~6B8!=V?AItSB2Tyt=*TS8WQdD>lG zwWZ<6p<`cH9qxH|?CJ1tPL@Y+F|g*+EdTsiyZKw=mM3?;R)k(DwQVi#UGTMK^!E$D zHGOj8h?mKZ$6|iO>cl4Q_1k}hRHZxj^$-4Fb2CA_tiHc@=kkax7N6=2%UoQ-C;nVK z?|$L*o$~FE4kmnj{3)Yrs`bw;YrnsK=iJ%dCGGb#>l>3RB>oc@{Tn$Qx##AUI(fBQ z<{2G+Y3HGwzkYp!i8IJ=Yr%>&!K5}OV96|#?WYnZYE!e~o#%%+i?gReWv8=zL?IpoaCtSS5n&-2*DOzPNHc~IWRa1T6@pL>I4M{ds=(P*(f3d@8HGQa-AB#n3)00PXYhTA zP^8i5y+$Y!a@of~<_pXZz1`VokiJA+0-}^^rV~kv!l3d(f4o&2)$0Ny1ga;rdPq>! zkw_v+7sD(L=yI`W`_;^ITg62-9aUOezAX}-eZ_5=DZfCs7SOl6nyOyagVYv)VsST# zJfZ{QLR8Hcw30&rNf0&@0z7q8^U>k=%1?zY^;o7YK9NrtzaY3^QBxG%d$zZcxJK(2 zEv@0)MKm}cMj07rhppat@JywVDYJQ+=IE94VaVW@Eb7{xb7Rr^MD_*nA`Bmk?t*J) z!!=6yV0$^KVX*zMW-Pk<`ZZek*^|EUl<-}Y@XWD%Q0VK?ywtglMD~XXUo9fL)y8vz zuLawpcn!AA$qw4i*V0@EGs|^E?kTtq>uc2KntAr1$8WF6GOcmvXt9df@j>YaWp+iu zfV3fa!h_TgT9I52WUz{s(HKO@Y$NieL?VN1t=^}8IxK#!1vWO#T3IvI=nU}IPR&wx z=xZrPO~V%Q6Tk0nuSM7Q-EHp0E({k+waTnzY!nuppZ8=mr#EDLgw>Wu-x+W12Pq)9{o%|CDJf&(!38mroBMFHT`d*?(n$k~K8@zb zSJ&#kUccr<<~NSVj%%hSbl0>77EtEsyZPVBkNm-=>9_IOqnWIwOV50|Hk{`e3tY7EojL|; zXsw2s)IeDUm%7|rsNrlodhyA|Kev^Cc)an=(B+>;Jf%9WjIwP5EJ-N(O6FKZ;34^!s6eF7_Ge>EnDiLl;~ab}iTpZpSSo7cY48UIj? zosO0fvgLttwtUhDPYwM1v+C#D6a2@-H{0-EsW$$Z#{8Y6!R&(#z>~}BeS`8V`jelt z%pUQ7CS8W{g^EQBGB(0p=K|vNf-9RevmS5E+^d=O=*dLIl}*nxXWhNDVEwVF>kj~I z!9O0p<9dFEcdzH=Zff4$idMgUD{M*C+c#Nnh~{ORD`J0(UK+CM-tUjEJX^{giCDej zx2NM)%dHOL43>SH@O7!*@*n43KOcHKsbAkw!dD5n4D)jjqvNT+(pI4!8tT8M(S?I$GCx&^k@LqIazU4M# z2`E|4`@{lLmO$5%OO5w}O%=uurCArb`V|cli>l+2SQUlwGB0|?EuzfXK>rp#YvvJ% za&nTOmE1$J^kR4Sq3yRO84C_Et9CAv{fbo^*V*+Ef8R1;k-oL7aUFA*;Bv*6qFWO~8#l#Sb; zJl~d&56ajywQ29$s~xX{EVuHt6GseQoP^&CBYLSew%T?92Gy0eb8qstEEWB7;Mr*K z;v>se{}bQ8`p@GkBW0pM8#&`0OTe7;Cswd$O~}?8zdcEsyWTJe8eipt@$AmGhTBY* z;bWe7-V%aaGyc~$LofC%yv)r65PnWY@IHLA;Vo^pVnu+XF{o6;&SA5-#DZ!QL18jD z_StF_$@Amb1G}^QLTnj0_yk)ijg}lQ6K%pY`rZrUWTZ1U;QMgTl6}A_<$#mQ-;!Ns z$-d{L074*qv>X%=*h8{tVMS1s5Iyw4gan_v3FQoY*bHi1_Do1TVd@KiPtHFvNOXha zg3r4Xg$$NZ{bu-PV+-PS^^h%Blz-p<79G(gjAVIDMo@rA*h)h80Ng=AtNv|28M>{R z!1t{ULz#syApA2&0f(>S69!@3iy*h)&n7+%y*WChnVz_|_VyfO^>{|=P2=LB7c`CRQJgCTR~a84eY>ybZO)0xeU(v%pI7e7nas(mJbdFE@Isjp?)02mcd)=E z2ekH+y>FO#-ynP6d1vO`s(KYAeic&zdxklVdqT{DV#wro(0g{gx!9}NFjOmKd(U&T zC5y>RLcD(2Vn)ZAt;VV|;Gm-W>yNGJ0Ln z19jgki>FTYeeHV|1D@j*N(fSNT_#!{_PkqnEO0c9YqosXb*Lv7po{_JnXP{XP(~$U5)Vjpb2(&yiK|8RDT~Oj;iSXT{D;JZ3$Ge?q zih?!^7gjDj<)6ImUjCdD1Fw74VQoTbQK8_%bO7qoc` zzPIhNs(k@Y3mu!6+D@kJK0zzaE%3A8+7$R%@~QiK{i3;~f({ElwV=UI#HTBIuy6=H zSPPY0)9V9E%mkdV>ROsS-lLDBehGTU?Q=e7pQU@eBv!xV!tp8Cn`LTo(msHyalZU?`H`st`*Uk1-}1k%TRLo&8xJ!Lz;R` z$Wyqc*R6voX^QU5&wieo&wiQrSHhFe$aK}Ns+8m#*&L` zj=W9JbtI9Nkw=aT%`sGoORx$_p^3C561z|~IoH@*UntW(vxQCLAUrDa%pA!R3su5p zD?~gTqf`Rg>z3ha9Nc!(CrF^6-;oT4dDh?}22i!`cBrTv;fm2Dc0>^SA|$B;mq9XA#ZRx1 z&oa+b)XcIkstNyRb&W;=;DyXd^cXT{NSO^N>`mRCur^BFVG5Sl8jW+yi8uy7bxUk4 zF2GE2aX>K3BK&&Othae1o=3iXD<3%ep;SJAS=SrR9iZrbcKG`|&HIne)H_5H=x!=- zd;HVa&0n7iEZFLFWui5Ei_YQ|PmFl=j~-w`LJ2w#h@o~RKtqSRkU#?xmz=Mj8?};4 z>n))3Dot;fIqe~9QAYJSSSGK*J*HN0tkzA;7i)_#Vr?msu!5zUE6z|_cDE^ttgS#z zZ7{C~o6&ZbkJbHlF_qnx%Zc33#*`9r+M_A{vKcM44?UMd8)2`6^uvM1xzgET?1rV{ zl_hwXY!HoS7R`Zq#ku3E1Mst-4;v%#Ey!5UUjK4F=&k9wnsL|+(Q3)0cxE*;zAs#U zC+Hx*|402saW0pn;ag<1<#7Sz-MA>)hn~lwiVUa2;E+ISZb9+SG63@7k`nx+sC-;i z{;$Arp;|vIMAJj?=%$Z19T!1yK3aJX<@>?4VQ%42B;Q>ORr4oIH+BT`iwojnN*0!) zh5gZPhwS2Xii<%&qK49XsN>JkLu>IHO}n9FUywto46zI9MhoswlzLxF?WPwCih5}I z&rAKM<8;t1R7SfM7o>KlhkL6-Y-jaWuH)Fn1!R`Mev3i#7^Z!PZRHEE5jJQeZS@W#%#VYWz0UQNa5AxG!Rx|&B+mk6w_+D#%Z6e)zh zIwT|npIR3>4-B8fAS`5n<`tI1 zJ48TjJ!mDN6dvAyDsIqwST>@9Vi1BDiUP>=!&qZ`+Y*PPv!Nv5FFQggTc;m}a!WqQ zYUk4%j2`qEuuT1zTs-{MgvRq~-|2GGH&up3A;(817na{&`Eg>_Sd~L=Y2w<|zrGly zo;#}8a*m1|BP|@wtK7JQ?(=b{YQeTXuhd1OM{Jn0%FZGItxvZTzxjjYM@5*;d2c?Y z)`NW4vo5WZ&!Zf z&T)^ny?2b&Tetq(m8HFPNd@t4;%S9?f8c8OkXIRe^}Y~gF$kdeGvdd|%*Czra*L|90d^>o{=ID(hj9-o% zdpH@9@$TXt%d%(ddQKkht4WI4wdB~KIww2nX~AkRVP&>WC5*4iDeuUf%T$%5D%1y_ z6BY5Si4tOfLgWyGvn|z*tKo{Z_WBix?83$8{U0orauBsv51~Yd^d;0C!Sy~MViT83 z0M~mtJ7~7SJ?R=;1VN;`eKs33s#5U=$wR149!6y%T&XyW#7Lf7a){FMhyle;Xnds( zTd{wDJ|)o1aQN!xL$#5pt9BXXJQUyPhZ31PYjkYvC+$9o=i`e5iY_5uOuP>u5CZ$)Gp*2$yr?mRHbM zSuUFFbz}1DtqvN1ooc2jnW*z*)_mv85rasPw)>*zRXd#%w)oMxmh}810yl9`t9M4L zHzg>veN{ndNH3e+VAa6BDa?Rc94(!j6g;AI6=8S?&J2~I(&?Nf96#r6n;S#ueq2lD z$;!+5#V42*_4)aVih4A@v35E0 zl}p2Ci>zQVnaY|(1PLr?RS_;W3?s585jK_}9DIX;64kGctB6bI49BL2&yF<+TN+OE znj6a?X$1ZptIjKDpI?Bjb#dltG~KWt(Yzkuxrk*=p6IKY#R;*+2g1C<=Sr|#X66Ks z$W~e4{e6b0l4L9X!pqXf`TLM6jV!YCTdkNW&RI+!(seapYpLI7e7AaF z%Cj!3aPuqI;GQax@=oqhCg$$9^V~we3_j|9T~M}q&h6js#&yuD>=P?$o9EOF^D3c^64O7 z5)1{}Wd)1wU!uHb1nvC(NYyJ+bO>u~$!dgzDR)g!(GjL$PS9KgjN6RD5&TK~LZ&cp zxxd5W?`HJ-sTS+8uXk+FJvH0#x5vWZ2gIs6vT3(0$2q35_YdTC*QL5jiBN>eW+Ub9 z@>)cJ%Q6XRQY*$~C8Ua<$wm9lGtN|ZOp&iD7x$=-w49zVzw*`a28Z#2!`mJlF76`* zT2}GOQ z#+=X6)MCQ@3CpiBiz*KNS@Ctq*s|YrrB5p`6P9?Xr$#Hqk5nNa!BrNnN4`^g4j$HM zf_{i9d}O9(&V9l>zu1QGgrHOs?w|kI&8e1aUZmE&E{L>Bp ze^hyREoFbOJgdWGImc0B2z!Y-9#|hQrd1QeM7=6{SW8w{l;&+NV%ZRPuXJI)ILnvW zSDEv&)3`}GjBJZg|6KX*(8XadLb}%XOHPiKoVPHhxtKgy8e()g*!n`{@*}?0R#oPF znoH%a@qw>D#l@enP4?ya+Sf1J$nQR%YB;{CD8?%U|C8pABL2OILnq1`0z;ihm5b5# zTf$m|AN$0z-B|PdZ7(9wEk59r_riXO6kIljWthgGj62p#W`1kJ|>_Pc?5Mk__^~8PU(<_W9F2oaF`q zsRz<_cC}qK^>19=P51jFiGDs}f?;C6<3PZ|!Dm-4KO0$FP;BozG3#+^E^` z90-BV;`5znPBsLP7hrF+FSn8~j2Rn>R|`Xh-=CC&zuTHmg4SVZyZzV3P zNbaT;bs{78uFIZd&z-x!;0;yZuyi+e&M&^PWr2&^H4n=ZzQwN1`xmbZaB)hnzoB>P zjKHTZYw5+F1xDZAICOu=ZOtOfZ*MMlamuxH7#-rN_dTD@Y008CS>){wtw-C`ShrCV4CHDpy_PeCBX?!QFkp9c3|rvCrt#s2~pxdCAS literal 0 HcmV?d00001 diff --git a/api/tests/integration_tests/models/speech2text/test_openai_whisper.py b/api/tests/integration_tests/models/speech2text/test_openai_whisper.py new file mode 100644 index 000000000..a649c794e --- /dev/null +++ b/api/tests/integration_tests/models/speech2text/test_openai_whisper.py @@ -0,0 +1,50 @@ +import json +import os +from unittest.mock import patch + +from core.model_providers.models.speech2text.openai_whisper import OpenAIWhisper +from core.model_providers.providers.openai_provider import OpenAIProvider +from models.provider import Provider, ProviderType + + +def get_mock_provider(valid_openai_api_key): + return Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name='openai', + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({'openai_api_key': valid_openai_api_key}), + is_valid=True, + ) + + +def get_mock_openai_whisper_model(): + model_name = 'whisper-1' + valid_openai_api_key = os.environ['OPENAI_API_KEY'] + openai_provider = OpenAIProvider(provider=get_mock_provider(valid_openai_api_key)) + return OpenAIWhisper( + model_provider=openai_provider, + name=model_name + ) + + +def decrypt_side_effect(tenant_id, encrypted_openai_api_key): + return encrypted_openai_api_key + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_run(mock_decrypt): + # Get the directory of the current file + current_dir = os.path.dirname(os.path.abspath(__file__)) + + # Construct the path to the audio file + audio_file_path = os.path.join(current_dir, 'audio.mp3') + + model = get_mock_openai_whisper_model() + + # Open the file and get the file object + with open(audio_file_path, 'rb') as audio_file: + rst = model.run(audio_file) + + assert isinstance(rst, dict) + assert rst['text'] == '1, 2, 3, 4, 5, 6, 7, 8, 9, 10' diff --git a/api/tests/test_controllers/test_account_api.py.bak b/api/tests/test_controllers/test_account_api.py.bak deleted file mode 100644 index a73c796b7..000000000 --- a/api/tests/test_controllers/test_account_api.py.bak +++ /dev/null @@ -1,75 +0,0 @@ -import json -import pytest -from flask import url_for - -from models.model import Account - -# Sample user data for testing -sample_user_data = { - 'name': 'Test User', - 'email': 'test@example.com', - 'interface_language': 'en-US', - 'interface_theme': 'light', - 'timezone': 'America/New_York', - 'password': 'testpassword', - 'new_password': 'newtestpassword', - 'repeat_new_password': 'newtestpassword' -} - -# Create a test user and log them in -@pytest.fixture(scope='function') -def logged_in_user(client, session): - # Create test user and add them to the database - # Replace this with your actual User model and any required fields - - # todo refer to api.controllers.setup.SetupApi.post() to create a user - db_user_data = sample_user_data.copy() - db_user_data['password_salt'] = 'testpasswordsalt' - del db_user_data['new_password'] - del db_user_data['repeat_new_password'] - test_user = Account(**db_user_data) - session.add(test_user) - session.commit() - - # Log in the test user - client.post(url_for('console.loginapi'), data={'email': sample_user_data['email'], 'password': sample_user_data['password']}) - - return test_user - -def test_account_profile(logged_in_user, client): - response = client.get(url_for('console.accountprofileapi')) - assert response.status_code == 200 - assert json.loads(response.data)['name'] == sample_user_data['name'] - -def test_account_name(logged_in_user, client): - new_name = 'New Test User' - response = client.post(url_for('console.accountnameapi'), json={'name': new_name}) - assert response.status_code == 200 - assert json.loads(response.data)['name'] == new_name - -def test_account_interface_language(logged_in_user, client): - new_language = 'zh-CN' - response = client.post(url_for('console.accountinterfacelanguageapi'), json={'interface_language': new_language}) - assert response.status_code == 200 - assert json.loads(response.data)['interface_language'] == new_language - -def test_account_interface_theme(logged_in_user, client): - new_theme = 'dark' - response = client.post(url_for('console.accountinterfacethemeapi'), json={'interface_theme': new_theme}) - assert response.status_code == 200 - assert json.loads(response.data)['interface_theme'] == new_theme - -def test_account_timezone(logged_in_user, client): - new_timezone = 'Asia/Shanghai' - response = client.post(url_for('console.accounttimezoneapi'), json={'timezone': new_timezone}) - assert response.status_code == 200 - assert json.loads(response.data)['timezone'] == new_timezone - -def test_account_password(logged_in_user, client): - response = client.post(url_for('console.accountpasswordapi'), json={ - 'password': sample_user_data['password'], - 'new_password': sample_user_data['new_password'], - 'repeat_new_password': sample_user_data['repeat_new_password'] - }) - assert response.status_code == 200 - assert json.loads(response.data)['result'] == 'success' diff --git a/api/tests/test_controllers/test_login.py b/api/tests/test_controllers/test_login.py deleted file mode 100644 index 559e2f809..000000000 --- a/api/tests/test_controllers/test_login.py +++ /dev/null @@ -1,108 +0,0 @@ -import pytest -from app import create_app, db -from flask_login import current_user -from models.model import Account, TenantAccountJoin, Tenant - - -@pytest.fixture -def client(test_client, db_session): - app = create_app() - app.config["TESTING"] = True - with app.app_context(): - db.create_all() - yield test_client - db.drop_all() - - -def test_login_api_post(client, db_session): - # create a tenant, account, and tenant account join - tenant = Tenant(name="Test Tenant", status="normal") - account = Account(email="test@test.com", name="Test User") - account.password_salt = "uQ7K0/0wUJ7VPhf3qBzwNQ==" - account.password = "A9YpfzjK7c/tOwzamrvpJg==" - db.session.add_all([tenant, account]) - db.session.flush() - tenant_account_join = TenantAccountJoin(tenant_id=tenant.id, account_id=account.id, is_tenant_owner=True) - db.session.add(tenant_account_join) - db.session.commit() - - # login with correct credentials - response = client.post("/login", json={ - "email": "test@test.com", - "password": "Abc123456", - "remember_me": True - }) - assert response.status_code == 200 - assert response.json == {"result": "success"} - assert current_user == account - assert 'tenant_id' in client.session - assert client.session['tenant_id'] == tenant.id - - # login with incorrect password - response = client.post("/login", json={ - "email": "test@test.com", - "password": "wrong_password", - "remember_me": True - }) - assert response.status_code == 401 - - # login with non-existent account - response = client.post("/login", json={ - "email": "non_existent_account@test.com", - "password": "Abc123456", - "remember_me": True - }) - assert response.status_code == 401 - - -def test_logout_api_get(client, db_session): - # create a tenant, account, and tenant account join - tenant = Tenant(name="Test Tenant", status="normal") - account = Account(email="test@test.com", name="Test User") - db.session.add_all([tenant, account]) - db.session.flush() - tenant_account_join = TenantAccountJoin(tenant_id=tenant.id, account_id=account.id, is_tenant_owner=True) - db.session.add(tenant_account_join) - db.session.commit() - - # login and check if session variable and current_user are set - with client.session_transaction() as session: - session['tenant_id'] = tenant.id - client.post("/login", json={ - "email": "test@test.com", - "password": "Abc123456", - "remember_me": True - }) - assert current_user == account - assert 'tenant_id' in client.session - assert client.session['tenant_id'] == tenant.id - - # logout and check if session variable and current_user are unset - response = client.get("/logout") - assert response.status_code == 200 - assert current_user.is_authenticated is False - assert 'tenant_id' not in client.session - - -def test_reset_password_api_get(client, db_session): - # create a tenant, account, and tenant account join - tenant = Tenant(name="Test Tenant", status="normal") - account = Account(email="test@test.com", name="Test User") - db.session.add_all([tenant, account]) - db.session.flush() - tenant_account_join = TenantAccountJoin(tenant_id=tenant.id, account_id=account.id, is_tenant_owner=True) - db.session.add(tenant_account_join) - db.session.commit() - - # reset password in cloud edition - app = client.application - app.config["CLOUD_EDITION"] = True - response = client.get("/reset_password") - assert response.status_code == 200 - assert response.json == {"result": "success"} - - # reset password in non-cloud edition - app.config["CLOUD_EDITION"] = False - response = client.get("/reset_password") - assert response.status_code == 200 - assert response.json == {"result": "success"} diff --git a/api/tests/test_controllers/test_setup.py b/api/tests/test_controllers/test_setup.py deleted file mode 100644 index 96a9b0911..000000000 --- a/api/tests/test_controllers/test_setup.py +++ /dev/null @@ -1,80 +0,0 @@ -import os -import pytest -from models.model import Account, Tenant, TenantAccountJoin - - -def test_setup_api_get(test_client,db_session): - response = test_client.get("/setup") - assert response.status_code == 200 - assert response.json == {"step": "not_start"} - - # create a tenant and check again - tenant = Tenant(name="Test Tenant", status="normal") - db_session.add(tenant) - db_session.commit() - response = test_client.get("/setup") - assert response.status_code == 200 - assert response.json == {"step": "step2"} - - # create setup file and check again - response = test_client.get("/setup") - assert response.status_code == 200 - assert response.json == {"step": "finished"} - - -def test_setup_api_post(test_client): - response = test_client.post("/setup", json={ - "email": "test@test.com", - "name": "Test User", - "password": "Abc123456" - }) - assert response.status_code == 200 - assert response.json == {"result": "success", "next_step": "step2"} - - # check if the tenant, account, and tenant account join records were created - tenant = Tenant.query.first() - assert tenant.name == "Test User's LLM Factory" - assert tenant.status == "normal" - assert tenant.encrypt_public_key - - account = Account.query.first() - assert account.email == "test@test.com" - assert account.name == "Test User" - assert account.password_salt - assert account.password - assert TenantAccountJoin.query.filter_by(account_id=account.id, is_tenant_owner=True).count() == 1 - - # check if password is encrypted correctly - salt = account.password_salt.encode() - password_hashed = account.password.encode() - assert account.password == base64.b64encode(hash_password("Abc123456", salt)).decode() - - -def test_setup_step2_api_post(test_client,db_session): - # create a tenant, account, and setup file - tenant = Tenant(name="Test Tenant", status="normal") - account = Account(email="test@test.com", name="Test User") - db_session.add_all([tenant, account]) - db_session.commit() - - # try to set up with incorrect language - response = test_client.post("/setup/step2", json={ - "interface_language": "invalid_language", - "timezone": "Asia/Shanghai" - }) - assert response.status_code == 400 - - # set up successfully - response = test_client.post("/setup/step2", json={ - "interface_language": "en", - "timezone": "Asia/Shanghai" - }) - assert response.status_code == 200 - assert response.json == {"result": "success", "next_step": "finished"} - - # check if account was updated correctly - account = Account.query.first() - assert account.interface_language == "en" - assert account.timezone == "Asia/Shanghai" - assert account.interface_theme == "light" - assert account.last_login_ip == "127.0.0.1" diff --git a/api/tests/test_factory.py b/api/tests/test_factory.py deleted file mode 100644 index 0d73168b4..000000000 --- a/api/tests/test_factory.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding:utf-8 -*- - -import pytest - -from app import create_app - -def test_create_app(): - - # Test Default(CE) Config - app = create_app() - - assert app.config['SECRET_KEY'] is not None - assert app.config['SQLALCHEMY_DATABASE_URI'] is not None - assert app.config['EDITION'] == "SELF_HOSTED" - - # Test TestConfig - from config import TestConfig - test_app = create_app(TestConfig()) - - assert test_app.config['SECRET_KEY'] is not None - assert test_app.config['SQLALCHEMY_DATABASE_URI'] is not None - assert test_app.config['TESTING'] is True \ No newline at end of file diff --git a/api/tests/unit_tests/__init__.py b/api/tests/unit_tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/unit_tests/model_providers/__init__.py b/api/tests/unit_tests/model_providers/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/api/tests/unit_tests/model_providers/fake_model_provider.py b/api/tests/unit_tests/model_providers/fake_model_provider.py new file mode 100644 index 000000000..4e14d5924 --- /dev/null +++ b/api/tests/unit_tests/model_providers/fake_model_provider.py @@ -0,0 +1,44 @@ +from typing import Type + +from core.model_providers.models.base import BaseProviderModel +from core.model_providers.models.entity.model_params import ModelType, ModelKwargsRules +from core.model_providers.models.llm.openai_model import OpenAIModel +from core.model_providers.providers.base import BaseModelProvider + + +class FakeModelProvider(BaseModelProvider): + @property + def provider_name(self): + return 'fake' + + def _get_fixed_model_list(self, model_type: ModelType) -> list[dict]: + return [{'id': 'test_model', 'name': 'Test Model'}] + + def get_model_class(self, model_type: ModelType) -> Type[BaseProviderModel]: + return OpenAIModel + + @classmethod + def is_provider_credentials_valid_or_raise(cls, credentials: dict): + pass + + @classmethod + def encrypt_provider_credentials(cls, tenant_id: str, credentials: dict) -> dict: + return credentials + + def get_provider_credentials(self, obfuscated: bool = False) -> dict: + return {} + + @classmethod + def is_model_credentials_valid_or_raise(cls, model_name: str, model_type: ModelType, credentials: dict): + pass + + @classmethod + def encrypt_model_credentials(cls, tenant_id: str, model_name: str, model_type: ModelType, + credentials: dict) -> dict: + return credentials + + def get_model_parameter_rules(self, model_name: str, model_type: ModelType) -> ModelKwargsRules: + return ModelKwargsRules() + + def get_model_credentials(self, model_name: str, model_type: ModelType, obfuscated: bool = False) -> dict: + return {} diff --git a/api/tests/unit_tests/model_providers/test_anthropic_provider.py b/api/tests/unit_tests/model_providers/test_anthropic_provider.py new file mode 100644 index 000000000..ea4b62a20 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_anthropic_provider.py @@ -0,0 +1,123 @@ +from typing import List, Optional, Any + +import anthropic +import httpx +import pytest +from unittest.mock import patch +import json + +from langchain.callbacks.manager import CallbackManagerForLLMRun +from langchain.schema import BaseMessage, ChatResult, ChatGeneration, AIMessage + +from core.model_providers.providers.anthropic_provider import AnthropicProvider +from core.model_providers.providers.base import CredentialsValidateFailedError +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'anthropic' +MODEL_PROVIDER_CLASS = AnthropicProvider +VALIDATE_CREDENTIAL_KEY = 'anthropic_api_key' + + +def mock_chat_generate(messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any): + return ChatResult(generations=[ChatGeneration(message=AIMessage(content='answer'))]) + + +def mock_chat_generate_invalid(messages: List[BaseMessage], + stop: Optional[List[str]] = None, + run_manager: Optional[CallbackManagerForLLMRun] = None, + **kwargs: Any): + raise anthropic.APIStatusError('Invalid credentials', + request=httpx._models.Request( + method='POST', + url='https://api.anthropic.com/v1/completions', + ), + response=httpx._models.Response( + status_code=401, + ), + body=None + ) + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +@patch('langchain.chat_models.ChatAnthropic._generate', side_effect=mock_chat_generate) +def test_is_provider_credentials_valid_or_raise_valid(mock_create): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({VALIDATE_CREDENTIAL_KEY: 'valid_key'}) + + +@patch('langchain.chat_models.ChatAnthropic._generate', side_effect=mock_chat_generate_invalid) +def test_is_provider_credentials_valid_or_raise_invalid(mock_create): + # raise CredentialsValidateFailedError if anthropic_api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + # raise CredentialsValidateFailedError if anthropic_api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({VALIDATE_CREDENTIAL_KEY: 'invalid_key'}) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', {VALIDATE_CREDENTIAL_KEY: api_key}) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result[VALIDATE_CREDENTIAL_KEY] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({VALIDATE_CREDENTIAL_KEY: 'encrypted_valid_key'}), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result[VALIDATE_CREDENTIAL_KEY] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + api_key = 'valid_key' + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({VALIDATE_CREDENTIAL_KEY: f'encrypted_{api_key}'}), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result[VALIDATE_CREDENTIAL_KEY][6:-2] + assert len(middle_token) == max(len(api_key) - 8, 0) + assert all(char == '*' for char in middle_token) + + +@patch('core.model_providers.providers.hosted.hosted_model_providers.anthropic') +def test_get_credentials_hosted(mock_hosted): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.SYSTEM.value, + encrypted_config='', + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + mock_hosted.api_key = 'hosted_key' + result = model_provider.get_provider_credentials() + assert result[VALIDATE_CREDENTIAL_KEY] == 'hosted_key' diff --git a/api/tests/unit_tests/model_providers/test_azure_openai_provider.py b/api/tests/unit_tests/model_providers/test_azure_openai_provider.py new file mode 100644 index 000000000..43788d4e0 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_azure_openai_provider.py @@ -0,0 +1,117 @@ +import pytest +from unittest.mock import patch, MagicMock +import json + +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.azure_openai_provider import AzureOpenAIProvider +from core.model_providers.providers.base import CredentialsValidateFailedError +from models.provider import ProviderType, Provider, ProviderModel + +PROVIDER_NAME = 'azure_openai' +MODEL_PROVIDER_CLASS = AzureOpenAIProvider +VALIDATE_CREDENTIAL = { + 'openai_api_base': 'https://xxxx.openai.azure.com/', + 'openai_api_key': 'valid_key', + 'base_model_name': 'gpt-35-turbo' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_model_credentials_valid_or_raise(mocker): + mocker.patch('langchain.chat_models.base.BaseChatModel.generate', return_value=None) + + # assert True if credentials is valid + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=VALIDATE_CREDENTIAL + ) + + +def test_is_model_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if credentials is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={} + ) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_model_credentials(mock_encrypt): + openai_api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_model_credentials( + tenant_id='tenant_id', + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={'openai_api_key': openai_api_key} + ) + mock_encrypt.assert_called_with('tenant_id', openai_api_key) + assert result['openai_api_key'] == f'encrypted_{openai_api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_custom(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['openai_api_key'] = 'encrypted_' + encrypted_credential['openai_api_key'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION + ) + assert result['openai_api_key'] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_obfuscated(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['openai_api_key'] = 'encrypted_' + encrypted_credential['openai_api_key'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + obfuscated=True + ) + middle_token = result['openai_api_key'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['openai_api_key']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_base_model_provider.py b/api/tests/unit_tests/model_providers/test_base_model_provider.py new file mode 100644 index 000000000..7d6e56eb0 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_base_model_provider.py @@ -0,0 +1,72 @@ +from unittest.mock import MagicMock + +import pytest + +from core.model_providers.error import QuotaExceededError +from core.model_providers.models.entity.model_params import ModelType +from models.provider import Provider, ProviderType +from tests.unit_tests.model_providers.fake_model_provider import FakeModelProvider + + +def test_get_supported_model_list(mocker): + mocker.patch.object( + FakeModelProvider, + 'get_rules', + return_value={'support_provider_types': ['custom'], 'model_flexibility': 'configurable'} + ) + + mock_provider_model = MagicMock() + mock_provider_model.model_name = 'test_model' + mock_query = MagicMock() + mock_query.filter.return_value.order_by.return_value.all.return_value = [mock_provider_model] + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + provider = FakeModelProvider(provider=Provider()) + result = provider.get_supported_model_list(ModelType.TEXT_GENERATION) + + assert result == [{'id': 'test_model', 'name': 'test_model'}] + + +def test_check_quota_over_limit(mocker): + mocker.patch.object( + FakeModelProvider, + 'get_rules', + return_value={'support_provider_types': ['system']} + ) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = None + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + provider = FakeModelProvider(provider=Provider(provider_type=ProviderType.SYSTEM.value)) + + with pytest.raises(QuotaExceededError): + provider.check_quota_over_limit() + + +def test_check_quota_not_over_limit(mocker): + mocker.patch.object( + FakeModelProvider, + 'get_rules', + return_value={'support_provider_types': ['system']} + ) + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = Provider() + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + provider = FakeModelProvider(provider=Provider(provider_type=ProviderType.SYSTEM.value)) + + assert provider.check_quota_over_limit() is None + + +def test_check_custom_quota_over_limit(mocker): + mocker.patch.object( + FakeModelProvider, + 'get_rules', + return_value={'support_provider_types': ['custom']} + ) + + provider = FakeModelProvider(provider=Provider(provider_type=ProviderType.CUSTOM.value)) + + assert provider.check_quota_over_limit() is None diff --git a/api/tests/unit_tests/model_providers/test_chatglm_provider.py b/api/tests/unit_tests/model_providers/test_chatglm_provider.py new file mode 100644 index 000000000..9dfa1291f --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_chatglm_provider.py @@ -0,0 +1,89 @@ +import pytest +from unittest.mock import patch +import json + +from langchain.schema import LLMResult, Generation, AIMessage, ChatResult, ChatGeneration + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.chatglm_provider import ChatGLMProvider +from core.model_providers.providers.spark_provider import SparkProvider +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'chatglm' +MODEL_PROVIDER_CLASS = ChatGLMProvider +VALIDATE_CREDENTIAL = { + 'api_base': 'valid_api_base', +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_provider_credentials_valid_or_raise_valid(mocker): + mocker.patch('langchain.llms.chatglm.ChatGLM._call', + return_value="abc") + + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL) + + +def test_is_provider_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + credential = VALIDATE_CREDENTIAL.copy() + credential['api_base'] = 'invalid_api_base' + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credential) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', VALIDATE_CREDENTIAL.copy()) + assert result['api_base'] == f'encrypted_{VALIDATE_CREDENTIAL["api_base"]}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_base'] = 'encrypted_' + encrypted_credential['api_base'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result['api_base'] == 'valid_api_base' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_base'] = 'encrypted_' + encrypted_credential['api_base'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result['api_base'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['api_base']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_huggingface_hub_provider.py b/api/tests/unit_tests/model_providers/test_huggingface_hub_provider.py new file mode 100644 index 000000000..3f3384834 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_huggingface_hub_provider.py @@ -0,0 +1,161 @@ +import pytest +from unittest.mock import patch, MagicMock +import json + +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.huggingface_hub_provider import HuggingfaceHubProvider +from models.provider import ProviderType, Provider, ProviderModel + +PROVIDER_NAME = 'huggingface_hub' +MODEL_PROVIDER_CLASS = HuggingfaceHubProvider +HOSTED_INFERENCE_API_VALIDATE_CREDENTIAL = { + 'huggingfacehub_api_type': 'hosted_inference_api', + 'huggingfacehub_api_token': 'valid_key' +} + +INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL = { + 'huggingfacehub_api_type': 'inference_endpoints', + 'huggingfacehub_api_token': 'valid_key', + 'huggingfacehub_endpoint_url': 'valid_url' +} + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +@patch('huggingface_hub.hf_api.ModelInfo') +def test_hosted_inference_api_is_credentials_valid_or_raise_valid(mock_model_info, mocker): + mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation') + mocker.patch('langchain.llms.huggingface_hub.HuggingFaceHub._call', return_value="abc") + + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=HOSTED_INFERENCE_API_VALIDATE_CREDENTIAL + ) + +@patch('huggingface_hub.hf_api.ModelInfo') +def test_hosted_inference_api_is_credentials_valid_or_raise_invalid(mock_model_info): + mock_model_info.return_value = MagicMock(pipeline_tag='text2text-generation') + + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={} + ) + + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={ + 'huggingfacehub_api_type': 'hosted_inference_api', + }) + + +def test_inference_endpoints_is_credentials_valid_or_raise_valid(mocker): + mocker.patch('huggingface_hub.hf_api.HfApi.whoami', return_value=None) + mocker.patch('langchain.llms.huggingface_endpoint.HuggingFaceEndpoint._call', return_value="abc") + + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL + ) + +def test_inference_endpoints_is_credentials_valid_or_raise_invalid(mocker): + mocker.patch('huggingface_hub.hf_api.HfApi.whoami', return_value=None) + + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={} + ) + + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={ + 'huggingfacehub_api_type': 'inference_endpoints', + 'huggingfacehub_endpoint_url': 'valid_url' + }) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_model_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_model_credentials( + tenant_id='tenant_id', + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL.copy() + ) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result['huggingfacehub_api_token'] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_custom(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL.copy() + encrypted_credential['huggingfacehub_api_token'] = 'encrypted_' + encrypted_credential['huggingfacehub_api_token'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION + ) + assert result['huggingfacehub_api_token'] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_obfuscated(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL.copy() + encrypted_credential['huggingfacehub_api_token'] = 'encrypted_' + encrypted_credential['huggingfacehub_api_token'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + obfuscated=True + ) + middle_token = result['huggingfacehub_api_token'][6:-2] + assert len(middle_token) == max(len(INFERENCE_ENDPOINTS_VALIDATE_CREDENTIAL['huggingfacehub_api_token']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_minimax_provider.py b/api/tests/unit_tests/model_providers/test_minimax_provider.py new file mode 100644 index 000000000..ec3e47627 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_minimax_provider.py @@ -0,0 +1,88 @@ +import pytest +from unittest.mock import patch +import json + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.minimax_provider import MinimaxProvider +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'minimax' +MODEL_PROVIDER_CLASS = MinimaxProvider +VALIDATE_CREDENTIAL = { + 'minimax_group_id': 'fake-group-id', + 'minimax_api_key': 'valid_key' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_provider_credentials_valid_or_raise_valid(mocker): + mocker.patch('langchain.llms.minimax.Minimax._call', return_value='abc') + + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL) + + +def test_is_provider_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + credential = VALIDATE_CREDENTIAL.copy() + credential['minimax_api_key'] = 'invalid_key' + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credential) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', VALIDATE_CREDENTIAL.copy()) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result['minimax_api_key'] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['minimax_api_key'] = 'encrypted_' + encrypted_credential['minimax_api_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result['minimax_api_key'] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['minimax_api_key'] = 'encrypted_' + encrypted_credential['minimax_api_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result['minimax_api_key'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['minimax_api_key']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_openai_provider.py b/api/tests/unit_tests/model_providers/test_openai_provider.py new file mode 100644 index 000000000..3e2f717ee --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_openai_provider.py @@ -0,0 +1,126 @@ +import pytest +from unittest.mock import patch, MagicMock +import json + +from openai.error import AuthenticationError + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.openai_provider import OpenAIProvider +from models.provider import ProviderType, Provider + +PROVIDER_NAME = 'openai' +MODEL_PROVIDER_CLASS = OpenAIProvider +VALIDATE_CREDENTIAL_KEY = 'openai_api_key' + + +def moderation_side_effect(*args, **kwargs): + if kwargs['api_key'] == 'valid_key': + mock_instance = MagicMock() + mock_instance.request = MagicMock() + return mock_instance, {} + else: + raise AuthenticationError('Invalid credentials') + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +@patch('openai.ChatCompletion.create', side_effect=moderation_side_effect) +def test_is_provider_credentials_valid_or_raise_valid(mock_create): + # assert True if api_key is valid + credentials = {VALIDATE_CREDENTIAL_KEY: 'valid_key'} + assert MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credentials) is None + + +@patch('openai.ChatCompletion.create', side_effect=moderation_side_effect) +def test_is_provider_credentials_valid_or_raise_invalid(mock_create): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({VALIDATE_CREDENTIAL_KEY: 'invalid_key'}) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', {VALIDATE_CREDENTIAL_KEY: api_key}) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result[VALIDATE_CREDENTIAL_KEY] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({VALIDATE_CREDENTIAL_KEY: 'encrypted_valid_key'}), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result[VALIDATE_CREDENTIAL_KEY] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom_str(mock_decrypt): + """ + Only the OpenAI provider needs to be compatible with the previous case where the encrypted_config was stored as a plain string. + + :param mock_decrypt: + :return: + """ + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config='encrypted_valid_key', + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result[VALIDATE_CREDENTIAL_KEY] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + openai_api_key = 'valid_key' + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps({VALIDATE_CREDENTIAL_KEY: f'encrypted_{openai_api_key}'}), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result[VALIDATE_CREDENTIAL_KEY][6:-2] + assert len(middle_token) == max(len(openai_api_key) - 8, 0) + assert all(char == '*' for char in middle_token) + + +@patch('core.model_providers.providers.hosted.hosted_model_providers.openai') +def test_get_credentials_hosted(mock_hosted): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.SYSTEM.value, + encrypted_config='', + is_valid=True + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + mock_hosted.api_key = 'hosted_key' + result = model_provider.get_provider_credentials() + assert result[VALIDATE_CREDENTIAL_KEY] == 'hosted_key' diff --git a/api/tests/unit_tests/model_providers/test_replicate_provider.py b/api/tests/unit_tests/model_providers/test_replicate_provider.py new file mode 100644 index 000000000..e555636f0 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_replicate_provider.py @@ -0,0 +1,125 @@ +import pytest +from unittest.mock import patch, MagicMock +import json + +from core.model_providers.models.entity.model_params import ModelType +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.replicate_provider import ReplicateProvider +from models.provider import ProviderType, Provider, ProviderModel + +PROVIDER_NAME = 'replicate' +MODEL_PROVIDER_CLASS = ReplicateProvider +VALIDATE_CREDENTIAL = { + 'model_version': 'fake-version', + 'replicate_api_token': 'valid_key' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_credentials_valid_or_raise_valid(mocker): + mock_query = MagicMock() + mock_query.return_value = None + mocker.patch('replicate.model.ModelCollection.get', return_value=mock_query) + mocker.patch('replicate.model.Model.versions', return_value=mock_query) + + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=VALIDATE_CREDENTIAL.copy() + ) + + +def test_is_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if replicate_api_token is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={} + ) + + # raise CredentialsValidateFailedError if replicate_api_token is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_model_credentials_valid_or_raise( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials={'replicate_api_token': 'invalid_key'}) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_model_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_model_credentials( + tenant_id='tenant_id', + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + credentials=VALIDATE_CREDENTIAL.copy() + ) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result['replicate_api_token'] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_custom(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['replicate_api_token'] = 'encrypted_' + encrypted_credential['replicate_api_token'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION + ) + assert result['replicate_api_token'] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_model_credentials_obfuscated(mock_decrypt, mocker): + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=None, + is_valid=True, + ) + + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['replicate_api_token'] = 'encrypted_' + encrypted_credential['replicate_api_token'] + + mock_query = MagicMock() + mock_query.filter.return_value.first.return_value = ProviderModel( + encrypted_config=json.dumps(encrypted_credential) + ) + mocker.patch('extensions.ext_database.db.session.query', return_value=mock_query) + + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_model_credentials( + model_name='test_model_name', + model_type=ModelType.TEXT_GENERATION, + obfuscated=True + ) + middle_token = result['replicate_api_token'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['replicate_api_token']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_spark_provider.py b/api/tests/unit_tests/model_providers/test_spark_provider.py new file mode 100644 index 000000000..7193221f1 --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_spark_provider.py @@ -0,0 +1,97 @@ +import pytest +from unittest.mock import patch +import json + +from langchain.schema import LLMResult, Generation, AIMessage, ChatResult, ChatGeneration + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.spark_provider import SparkProvider +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'spark' +MODEL_PROVIDER_CLASS = SparkProvider +VALIDATE_CREDENTIAL = { + 'app_id': 'valid_app_id', + 'api_key': 'valid_key', + 'api_secret': 'valid_secret' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_provider_credentials_valid_or_raise_valid(mocker): + mocker.patch('core.third_party.langchain.llms.spark.ChatSpark._generate', + return_value=ChatResult(generations=[ChatGeneration(message=AIMessage(content="abc"))])) + + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL) + + +def test_is_provider_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + credential = VALIDATE_CREDENTIAL.copy() + credential['api_key'] = 'invalid_key' + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credential) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', VALIDATE_CREDENTIAL.copy()) + assert result['api_key'] == f'encrypted_{VALIDATE_CREDENTIAL["api_key"]}' + assert result['api_secret'] == f'encrypted_{VALIDATE_CREDENTIAL["api_secret"]}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_key'] = 'encrypted_' + encrypted_credential['api_key'] + encrypted_credential['api_secret'] = 'encrypted_' + encrypted_credential['api_secret'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result['api_key'] == 'valid_key' + assert result['api_secret'] == 'valid_secret' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_key'] = 'encrypted_' + encrypted_credential['api_key'] + encrypted_credential['api_secret'] = 'encrypted_' + encrypted_credential['api_secret'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result['api_key'][6:-2] + middle_secret = result['api_secret'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['api_key']) - 8, 0) + assert len(middle_secret) == max(len(VALIDATE_CREDENTIAL['api_secret']) - 8, 0) + assert all(char == '*' for char in middle_token) + assert all(char == '*' for char in middle_secret) diff --git a/api/tests/unit_tests/model_providers/test_tongyi_provider.py b/api/tests/unit_tests/model_providers/test_tongyi_provider.py new file mode 100644 index 000000000..275a1908f --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_tongyi_provider.py @@ -0,0 +1,90 @@ +import pytest +from unittest.mock import patch +import json + +from langchain.schema import LLMResult, Generation + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.minimax_provider import MinimaxProvider +from core.model_providers.providers.tongyi_provider import TongyiProvider +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'tongyi' +MODEL_PROVIDER_CLASS = TongyiProvider +VALIDATE_CREDENTIAL = { + 'dashscope_api_key': 'valid_key' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_provider_credentials_valid_or_raise_valid(mocker): + mocker.patch('langchain.llms.tongyi.Tongyi._generate', return_value=LLMResult(generations=[[Generation(text="abc")]])) + + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL) + + +def test_is_provider_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + credential = VALIDATE_CREDENTIAL.copy() + credential['dashscope_api_key'] = 'invalid_key' + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credential) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + api_key = 'valid_key' + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', VALIDATE_CREDENTIAL.copy()) + mock_encrypt.assert_called_with('tenant_id', api_key) + assert result['dashscope_api_key'] == f'encrypted_{api_key}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['dashscope_api_key'] = 'encrypted_' + encrypted_credential['dashscope_api_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result['dashscope_api_key'] == 'valid_key' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['dashscope_api_key'] = 'encrypted_' + encrypted_credential['dashscope_api_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result['dashscope_api_key'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['dashscope_api_key']) - 8, 0) + assert all(char == '*' for char in middle_token) diff --git a/api/tests/unit_tests/model_providers/test_wenxin_provider.py b/api/tests/unit_tests/model_providers/test_wenxin_provider.py new file mode 100644 index 000000000..9f714bb6d --- /dev/null +++ b/api/tests/unit_tests/model_providers/test_wenxin_provider.py @@ -0,0 +1,93 @@ +import pytest +from unittest.mock import patch +import json + +from core.model_providers.providers.base import CredentialsValidateFailedError +from core.model_providers.providers.wenxin_provider import WenxinProvider +from models.provider import ProviderType, Provider + + +PROVIDER_NAME = 'wenxin' +MODEL_PROVIDER_CLASS = WenxinProvider +VALIDATE_CREDENTIAL = { + 'api_key': 'valid_key', + 'secret_key': 'valid_secret' +} + + +def encrypt_side_effect(tenant_id, encrypt_key): + return f'encrypted_{encrypt_key}' + + +def decrypt_side_effect(tenant_id, encrypted_key): + return encrypted_key.replace('encrypted_', '') + + +def test_is_provider_credentials_valid_or_raise_valid(mocker): + mocker.patch('core.third_party.langchain.llms.wenxin.Wenxin._call', return_value="abc") + + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(VALIDATE_CREDENTIAL) + + +def test_is_provider_credentials_valid_or_raise_invalid(): + # raise CredentialsValidateFailedError if api_key is not in credentials + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise({}) + + credential = VALIDATE_CREDENTIAL.copy() + credential['api_key'] = 'invalid_key' + + # raise CredentialsValidateFailedError if api_key is invalid + with pytest.raises(CredentialsValidateFailedError): + MODEL_PROVIDER_CLASS.is_provider_credentials_valid_or_raise(credential) + + +@patch('core.helper.encrypter.encrypt_token', side_effect=encrypt_side_effect) +def test_encrypt_credentials(mock_encrypt): + result = MODEL_PROVIDER_CLASS.encrypt_provider_credentials('tenant_id', VALIDATE_CREDENTIAL.copy()) + assert result['api_key'] == f'encrypted_{VALIDATE_CREDENTIAL["api_key"]}' + assert result['secret_key'] == f'encrypted_{VALIDATE_CREDENTIAL["secret_key"]}' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_custom(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_key'] = 'encrypted_' + encrypted_credential['api_key'] + encrypted_credential['secret_key'] = 'encrypted_' + encrypted_credential['secret_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials() + assert result['api_key'] == 'valid_key' + assert result['secret_key'] == 'valid_secret' + + +@patch('core.helper.encrypter.decrypt_token', side_effect=decrypt_side_effect) +def test_get_credentials_obfuscated(mock_decrypt): + encrypted_credential = VALIDATE_CREDENTIAL.copy() + encrypted_credential['api_key'] = 'encrypted_' + encrypted_credential['api_key'] + encrypted_credential['secret_key'] = 'encrypted_' + encrypted_credential['secret_key'] + + provider = Provider( + id='provider_id', + tenant_id='tenant_id', + provider_name=PROVIDER_NAME, + provider_type=ProviderType.CUSTOM.value, + encrypted_config=json.dumps(encrypted_credential), + is_valid=True, + ) + model_provider = MODEL_PROVIDER_CLASS(provider=provider) + result = model_provider.get_provider_credentials(obfuscated=True) + middle_token = result['api_key'][6:-2] + middle_secret = result['secret_key'][6:-2] + assert len(middle_token) == max(len(VALIDATE_CREDENTIAL['api_key']) - 8, 0) + assert len(middle_secret) == max(len(VALIDATE_CREDENTIAL['secret_key']) - 8, 0) + assert all(char == '*' for char in middle_token) + assert all(char == '*' for char in middle_secret)