chore(api/core): apply ruff reformatting (#7624)
This commit is contained in:
@@ -46,7 +46,7 @@ class HostingConfiguration:
|
||||
def init_app(self, app: Flask) -> None:
|
||||
config = app.config
|
||||
|
||||
if config.get('EDITION') != 'CLOUD':
|
||||
if config.get("EDITION") != "CLOUD":
|
||||
return
|
||||
|
||||
self.provider_map["azure_openai"] = self.init_azure_openai(config)
|
||||
@@ -65,7 +65,7 @@ class HostingConfiguration:
|
||||
credentials = {
|
||||
"openai_api_key": app_config.get("HOSTED_AZURE_OPENAI_API_KEY"),
|
||||
"openai_api_base": app_config.get("HOSTED_AZURE_OPENAI_API_BASE"),
|
||||
"base_model_name": "gpt-35-turbo"
|
||||
"base_model_name": "gpt-35-turbo",
|
||||
}
|
||||
|
||||
quotas = []
|
||||
@@ -77,26 +77,45 @@ class HostingConfiguration:
|
||||
RestrictModel(model="gpt-4o", base_model_name="gpt-4o", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-4o-mini", base_model_name="gpt-4o-mini", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-4-32k", base_model_name="gpt-4-32k", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-4-1106-preview", base_model_name="gpt-4-1106-preview", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-4-vision-preview", base_model_name="gpt-4-vision-preview", model_type=ModelType.LLM),
|
||||
RestrictModel(
|
||||
model="gpt-4-1106-preview", base_model_name="gpt-4-1106-preview", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(
|
||||
model="gpt-4-vision-preview", base_model_name="gpt-4-vision-preview", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(model="gpt-35-turbo", base_model_name="gpt-35-turbo", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-35-turbo-1106", base_model_name="gpt-35-turbo-1106", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-35-turbo-instruct", base_model_name="gpt-35-turbo-instruct", model_type=ModelType.LLM),
|
||||
RestrictModel(model="gpt-35-turbo-16k", base_model_name="gpt-35-turbo-16k", model_type=ModelType.LLM),
|
||||
RestrictModel(model="text-davinci-003", base_model_name="text-davinci-003", model_type=ModelType.LLM),
|
||||
RestrictModel(model="text-embedding-ada-002", base_model_name="text-embedding-ada-002", model_type=ModelType.TEXT_EMBEDDING),
|
||||
RestrictModel(model="text-embedding-3-small", base_model_name="text-embedding-3-small", model_type=ModelType.TEXT_EMBEDDING),
|
||||
RestrictModel(model="text-embedding-3-large", base_model_name="text-embedding-3-large", model_type=ModelType.TEXT_EMBEDDING),
|
||||
]
|
||||
RestrictModel(
|
||||
model="gpt-35-turbo-1106", base_model_name="gpt-35-turbo-1106", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(
|
||||
model="gpt-35-turbo-instruct", base_model_name="gpt-35-turbo-instruct", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(
|
||||
model="gpt-35-turbo-16k", base_model_name="gpt-35-turbo-16k", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(
|
||||
model="text-davinci-003", base_model_name="text-davinci-003", model_type=ModelType.LLM
|
||||
),
|
||||
RestrictModel(
|
||||
model="text-embedding-ada-002",
|
||||
base_model_name="text-embedding-ada-002",
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
),
|
||||
RestrictModel(
|
||||
model="text-embedding-3-small",
|
||||
base_model_name="text-embedding-3-small",
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
),
|
||||
RestrictModel(
|
||||
model="text-embedding-3-large",
|
||||
base_model_name="text-embedding-3-large",
|
||||
model_type=ModelType.TEXT_EMBEDDING,
|
||||
),
|
||||
],
|
||||
)
|
||||
quotas.append(trial_quota)
|
||||
|
||||
return HostingProvider(
|
||||
enabled=True,
|
||||
credentials=credentials,
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
)
|
||||
return HostingProvider(enabled=True, credentials=credentials, quota_unit=quota_unit, quotas=quotas)
|
||||
|
||||
return HostingProvider(
|
||||
enabled=False,
|
||||
@@ -110,17 +129,12 @@ class HostingConfiguration:
|
||||
if app_config.get("HOSTED_OPENAI_TRIAL_ENABLED"):
|
||||
hosted_quota_limit = int(app_config.get("HOSTED_OPENAI_QUOTA_LIMIT", "200"))
|
||||
trial_models = self.parse_restrict_models_from_env(app_config, "HOSTED_OPENAI_TRIAL_MODELS")
|
||||
trial_quota = TrialHostingQuota(
|
||||
quota_limit=hosted_quota_limit,
|
||||
restrict_models=trial_models
|
||||
)
|
||||
trial_quota = TrialHostingQuota(quota_limit=hosted_quota_limit, restrict_models=trial_models)
|
||||
quotas.append(trial_quota)
|
||||
|
||||
if app_config.get("HOSTED_OPENAI_PAID_ENABLED"):
|
||||
paid_models = self.parse_restrict_models_from_env(app_config, "HOSTED_OPENAI_PAID_MODELS")
|
||||
paid_quota = PaidHostingQuota(
|
||||
restrict_models=paid_models
|
||||
)
|
||||
paid_quota = PaidHostingQuota(restrict_models=paid_models)
|
||||
quotas.append(paid_quota)
|
||||
|
||||
if len(quotas) > 0:
|
||||
@@ -134,12 +148,7 @@ class HostingConfiguration:
|
||||
if app_config.get("HOSTED_OPENAI_API_ORGANIZATION"):
|
||||
credentials["openai_organization"] = app_config.get("HOSTED_OPENAI_API_ORGANIZATION")
|
||||
|
||||
return HostingProvider(
|
||||
enabled=True,
|
||||
credentials=credentials,
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
)
|
||||
return HostingProvider(enabled=True, credentials=credentials, quota_unit=quota_unit, quotas=quotas)
|
||||
|
||||
return HostingProvider(
|
||||
enabled=False,
|
||||
@@ -153,9 +162,7 @@ class HostingConfiguration:
|
||||
|
||||
if app_config.get("HOSTED_ANTHROPIC_TRIAL_ENABLED"):
|
||||
hosted_quota_limit = int(app_config.get("HOSTED_ANTHROPIC_QUOTA_LIMIT", "0"))
|
||||
trial_quota = TrialHostingQuota(
|
||||
quota_limit=hosted_quota_limit
|
||||
)
|
||||
trial_quota = TrialHostingQuota(quota_limit=hosted_quota_limit)
|
||||
quotas.append(trial_quota)
|
||||
|
||||
if app_config.get("HOSTED_ANTHROPIC_PAID_ENABLED"):
|
||||
@@ -170,12 +177,7 @@ class HostingConfiguration:
|
||||
if app_config.get("HOSTED_ANTHROPIC_API_BASE"):
|
||||
credentials["anthropic_api_url"] = app_config.get("HOSTED_ANTHROPIC_API_BASE")
|
||||
|
||||
return HostingProvider(
|
||||
enabled=True,
|
||||
credentials=credentials,
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
)
|
||||
return HostingProvider(enabled=True, credentials=credentials, quota_unit=quota_unit, quotas=quotas)
|
||||
|
||||
return HostingProvider(
|
||||
enabled=False,
|
||||
@@ -192,7 +194,7 @@ class HostingConfiguration:
|
||||
enabled=True,
|
||||
credentials=None, # use credentials from the provider
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
quotas=quotas,
|
||||
)
|
||||
|
||||
return HostingProvider(
|
||||
@@ -210,7 +212,7 @@ class HostingConfiguration:
|
||||
enabled=True,
|
||||
credentials=None, # use credentials from the provider
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
quotas=quotas,
|
||||
)
|
||||
|
||||
return HostingProvider(
|
||||
@@ -228,7 +230,7 @@ class HostingConfiguration:
|
||||
enabled=True,
|
||||
credentials=None, # use credentials from the provider
|
||||
quota_unit=quota_unit,
|
||||
quotas=quotas
|
||||
quotas=quotas,
|
||||
)
|
||||
|
||||
return HostingProvider(
|
||||
@@ -238,21 +240,19 @@ class HostingConfiguration:
|
||||
|
||||
@staticmethod
|
||||
def init_moderation_config(app_config: Config) -> HostedModerationConfig:
|
||||
if app_config.get("HOSTED_MODERATION_ENABLED") \
|
||||
and app_config.get("HOSTED_MODERATION_PROVIDERS"):
|
||||
if app_config.get("HOSTED_MODERATION_ENABLED") and app_config.get("HOSTED_MODERATION_PROVIDERS"):
|
||||
return HostedModerationConfig(
|
||||
enabled=True,
|
||||
providers=app_config.get("HOSTED_MODERATION_PROVIDERS").split(',')
|
||||
enabled=True, providers=app_config.get("HOSTED_MODERATION_PROVIDERS").split(",")
|
||||
)
|
||||
|
||||
return HostedModerationConfig(
|
||||
enabled=False
|
||||
)
|
||||
return HostedModerationConfig(enabled=False)
|
||||
|
||||
@staticmethod
|
||||
def parse_restrict_models_from_env(app_config: Config, env_var: str) -> list[RestrictModel]:
|
||||
models_str = app_config.get(env_var)
|
||||
models_list = models_str.split(",") if models_str else []
|
||||
return [RestrictModel(model=model_name.strip(), model_type=ModelType.LLM) for model_name in models_list if
|
||||
model_name.strip()]
|
||||
|
||||
return [
|
||||
RestrictModel(model=model_name.strip(), model_type=ModelType.LLM)
|
||||
for model_name in models_list
|
||||
if model_name.strip()
|
||||
]
|
||||
|
Reference in New Issue
Block a user