fix: Before publish the app, preview the voice of tts, it raise an er… (#21821)
Co-authored-by: 刘江波 <jiangbo721@163.com>
This commit is contained in:
@@ -90,23 +90,11 @@ class ChatMessageTextApi(Resource):
|
|||||||
|
|
||||||
message_id = args.get("message_id", None)
|
message_id = args.get("message_id", None)
|
||||||
text = args.get("text", None)
|
text = args.get("text", None)
|
||||||
if (
|
voice = args.get("voice", None)
|
||||||
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
|
|
||||||
and app_model.workflow
|
response = AudioService.transcript_tts(
|
||||||
and app_model.workflow.features_dict
|
app_model=app_model, text=text, voice=voice, message_id=message_id, is_draft=True
|
||||||
):
|
)
|
||||||
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
|
|
||||||
if text_to_speech is None:
|
|
||||||
raise ValueError("TTS is not enabled")
|
|
||||||
voice = args.get("voice") or text_to_speech.get("voice")
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
if app_model.app_model_config is None:
|
|
||||||
raise ValueError("AppModelConfig not found")
|
|
||||||
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
|
|
||||||
except Exception:
|
|
||||||
voice = None
|
|
||||||
response = AudioService.transcript_tts(app_model=app_model, text=text, message_id=message_id, voice=voice)
|
|
||||||
return response
|
return response
|
||||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||||
logging.exception("App model config broken.")
|
logging.exception("App model config broken.")
|
||||||
|
@@ -18,7 +18,6 @@ from controllers.console.app.error import (
|
|||||||
from controllers.console.explore.wraps import InstalledAppResource
|
from controllers.console.explore.wraps import InstalledAppResource
|
||||||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||||
from core.model_runtime.errors.invoke import InvokeError
|
from core.model_runtime.errors.invoke import InvokeError
|
||||||
from models.model import AppMode
|
|
||||||
from services.audio_service import AudioService
|
from services.audio_service import AudioService
|
||||||
from services.errors.audio import (
|
from services.errors.audio import (
|
||||||
AudioTooLargeServiceError,
|
AudioTooLargeServiceError,
|
||||||
@@ -79,19 +78,9 @@ class ChatTextApi(InstalledAppResource):
|
|||||||
|
|
||||||
message_id = args.get("message_id", None)
|
message_id = args.get("message_id", None)
|
||||||
text = args.get("text", None)
|
text = args.get("text", None)
|
||||||
if (
|
voice = args.get("voice", None)
|
||||||
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
|
|
||||||
and app_model.workflow
|
response = AudioService.transcript_tts(app_model=app_model, text=text, voice=voice, message_id=message_id)
|
||||||
and app_model.workflow.features_dict
|
|
||||||
):
|
|
||||||
text_to_speech = app_model.workflow.features_dict.get("text_to_speech")
|
|
||||||
voice = args.get("voice") or text_to_speech.get("voice")
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
|
|
||||||
except Exception:
|
|
||||||
voice = None
|
|
||||||
response = AudioService.transcript_tts(app_model=app_model, message_id=message_id, voice=voice, text=text)
|
|
||||||
return response
|
return response
|
||||||
except services.errors.app_model_config.AppModelConfigBrokenError:
|
except services.errors.app_model_config.AppModelConfigBrokenError:
|
||||||
logging.exception("App model config broken.")
|
logging.exception("App model config broken.")
|
||||||
|
@@ -20,7 +20,7 @@ from controllers.service_api.app.error import (
|
|||||||
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
|
||||||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||||
from core.model_runtime.errors.invoke import InvokeError
|
from core.model_runtime.errors.invoke import InvokeError
|
||||||
from models.model import App, AppMode, EndUser
|
from models.model import App, EndUser
|
||||||
from services.audio_service import AudioService
|
from services.audio_service import AudioService
|
||||||
from services.errors.audio import (
|
from services.errors.audio import (
|
||||||
AudioTooLargeServiceError,
|
AudioTooLargeServiceError,
|
||||||
@@ -78,20 +78,9 @@ class TextApi(Resource):
|
|||||||
|
|
||||||
message_id = args.get("message_id", None)
|
message_id = args.get("message_id", None)
|
||||||
text = args.get("text", None)
|
text = args.get("text", None)
|
||||||
if (
|
voice = args.get("voice", None)
|
||||||
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
|
|
||||||
and app_model.workflow
|
|
||||||
and app_model.workflow.features_dict
|
|
||||||
):
|
|
||||||
text_to_speech = app_model.workflow.features_dict.get("text_to_speech", {})
|
|
||||||
voice = args.get("voice") or text_to_speech.get("voice")
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
|
|
||||||
except Exception:
|
|
||||||
voice = None
|
|
||||||
response = AudioService.transcript_tts(
|
response = AudioService.transcript_tts(
|
||||||
app_model=app_model, message_id=message_id, end_user=end_user.external_user_id, voice=voice, text=text
|
app_model=app_model, text=text, voice=voice, end_user=end_user.external_user_id, message_id=message_id
|
||||||
)
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
@@ -19,7 +19,7 @@ from controllers.web.error import (
|
|||||||
from controllers.web.wraps import WebApiResource
|
from controllers.web.wraps import WebApiResource
|
||||||
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
|
||||||
from core.model_runtime.errors.invoke import InvokeError
|
from core.model_runtime.errors.invoke import InvokeError
|
||||||
from models.model import App, AppMode
|
from models.model import App
|
||||||
from services.audio_service import AudioService
|
from services.audio_service import AudioService
|
||||||
from services.errors.audio import (
|
from services.errors.audio import (
|
||||||
AudioTooLargeServiceError,
|
AudioTooLargeServiceError,
|
||||||
@@ -77,21 +77,9 @@ class TextApi(WebApiResource):
|
|||||||
|
|
||||||
message_id = args.get("message_id", None)
|
message_id = args.get("message_id", None)
|
||||||
text = args.get("text", None)
|
text = args.get("text", None)
|
||||||
if (
|
voice = args.get("voice", None)
|
||||||
app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}
|
|
||||||
and app_model.workflow
|
|
||||||
and app_model.workflow.features_dict
|
|
||||||
):
|
|
||||||
text_to_speech = app_model.workflow.features_dict.get("text_to_speech", {})
|
|
||||||
voice = args.get("voice") or text_to_speech.get("voice")
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
voice = args.get("voice") or app_model.app_model_config.text_to_speech_dict.get("voice")
|
|
||||||
except Exception:
|
|
||||||
voice = None
|
|
||||||
|
|
||||||
response = AudioService.transcript_tts(
|
response = AudioService.transcript_tts(
|
||||||
app_model=app_model, message_id=message_id, end_user=end_user.external_user_id, voice=voice, text=text
|
app_model=app_model, text=text, voice=voice, end_user=end_user.external_user_id, message_id=message_id
|
||||||
)
|
)
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
@@ -1,13 +1,16 @@
|
|||||||
import io
|
import io
|
||||||
import logging
|
import logging
|
||||||
import uuid
|
import uuid
|
||||||
|
from collections.abc import Generator
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
from flask import Response, stream_with_context
|
||||||
from werkzeug.datastructures import FileStorage
|
from werkzeug.datastructures import FileStorage
|
||||||
|
|
||||||
from constants import AUDIO_EXTENSIONS
|
from constants import AUDIO_EXTENSIONS
|
||||||
from core.model_manager import ModelManager
|
from core.model_manager import ModelManager
|
||||||
from core.model_runtime.entities.model_entities import ModelType
|
from core.model_runtime.entities.model_entities import ModelType
|
||||||
|
from extensions.ext_database import db
|
||||||
from models.model import App, AppMode, AppModelConfig, Message, MessageStatus
|
from models.model import App, AppMode, AppModelConfig, Message, MessageStatus
|
||||||
from services.errors.audio import (
|
from services.errors.audio import (
|
||||||
AudioTooLargeServiceError,
|
AudioTooLargeServiceError,
|
||||||
@@ -16,6 +19,7 @@ from services.errors.audio import (
|
|||||||
ProviderNotSupportTextToSpeechServiceError,
|
ProviderNotSupportTextToSpeechServiceError,
|
||||||
UnsupportedAudioTypeServiceError,
|
UnsupportedAudioTypeServiceError,
|
||||||
)
|
)
|
||||||
|
from services.workflow_service import WorkflowService
|
||||||
|
|
||||||
FILE_SIZE = 30
|
FILE_SIZE = 30
|
||||||
FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
|
FILE_SIZE_LIMIT = FILE_SIZE * 1024 * 1024
|
||||||
@@ -74,35 +78,36 @@ class AudioService:
|
|||||||
voice: Optional[str] = None,
|
voice: Optional[str] = None,
|
||||||
end_user: Optional[str] = None,
|
end_user: Optional[str] = None,
|
||||||
message_id: Optional[str] = None,
|
message_id: Optional[str] = None,
|
||||||
|
is_draft: bool = False,
|
||||||
):
|
):
|
||||||
from collections.abc import Generator
|
|
||||||
|
|
||||||
from flask import Response, stream_with_context
|
|
||||||
|
|
||||||
from app import app
|
from app import app
|
||||||
from extensions.ext_database import db
|
|
||||||
|
|
||||||
def invoke_tts(text_content: str, app_model: App, voice: Optional[str] = None):
|
def invoke_tts(text_content: str, app_model: App, voice: Optional[str] = None, is_draft: bool = False):
|
||||||
with app.app_context():
|
with app.app_context():
|
||||||
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
if voice is None:
|
||||||
workflow = app_model.workflow
|
if app_model.mode in {AppMode.ADVANCED_CHAT.value, AppMode.WORKFLOW.value}:
|
||||||
if workflow is None:
|
if is_draft:
|
||||||
raise ValueError("TTS is not enabled")
|
workflow = WorkflowService().get_draft_workflow(app_model=app_model)
|
||||||
|
else:
|
||||||
|
workflow = app_model.workflow
|
||||||
|
if (
|
||||||
|
workflow is None
|
||||||
|
or "text_to_speech" not in workflow.features_dict
|
||||||
|
or not workflow.features_dict["text_to_speech"].get("enabled")
|
||||||
|
):
|
||||||
|
raise ValueError("TTS is not enabled")
|
||||||
|
|
||||||
features_dict = workflow.features_dict
|
voice = workflow.features_dict["text_to_speech"].get("voice")
|
||||||
if "text_to_speech" not in features_dict or not features_dict["text_to_speech"].get("enabled"):
|
else:
|
||||||
raise ValueError("TTS is not enabled")
|
if not is_draft:
|
||||||
|
if app_model.app_model_config is None:
|
||||||
|
raise ValueError("AppModelConfig not found")
|
||||||
|
text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
|
||||||
|
|
||||||
voice = features_dict["text_to_speech"].get("voice") if voice is None else voice
|
if not text_to_speech_dict.get("enabled"):
|
||||||
else:
|
raise ValueError("TTS is not enabled")
|
||||||
if app_model.app_model_config is None:
|
|
||||||
raise ValueError("AppModelConfig not found")
|
|
||||||
text_to_speech_dict = app_model.app_model_config.text_to_speech_dict
|
|
||||||
|
|
||||||
if not text_to_speech_dict.get("enabled"):
|
voice = text_to_speech_dict.get("voice")
|
||||||
raise ValueError("TTS is not enabled")
|
|
||||||
|
|
||||||
voice = text_to_speech_dict.get("voice") if voice is None else voice
|
|
||||||
|
|
||||||
model_manager = ModelManager()
|
model_manager = ModelManager()
|
||||||
model_instance = model_manager.get_default_model_instance(
|
model_instance = model_manager.get_default_model_instance(
|
||||||
@@ -136,14 +141,14 @@ class AudioService:
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
else:
|
else:
|
||||||
response = invoke_tts(message.answer, app_model=app_model, voice=voice)
|
response = invoke_tts(text_content=message.answer, app_model=app_model, voice=voice, is_draft=is_draft)
|
||||||
if isinstance(response, Generator):
|
if isinstance(response, Generator):
|
||||||
return Response(stream_with_context(response), content_type="audio/mpeg")
|
return Response(stream_with_context(response), content_type="audio/mpeg")
|
||||||
return response
|
return response
|
||||||
else:
|
else:
|
||||||
if text is None:
|
if text is None:
|
||||||
raise ValueError("Text is required")
|
raise ValueError("Text is required")
|
||||||
response = invoke_tts(text, app_model, voice)
|
response = invoke_tts(text_content=text, app_model=app_model, voice=voice, is_draft=is_draft)
|
||||||
if isinstance(response, Generator):
|
if isinstance(response, Generator):
|
||||||
return Response(stream_with_context(response), content_type="audio/mpeg")
|
return Response(stream_with_context(response), content_type="audio/mpeg")
|
||||||
return response
|
return response
|
||||||
|
Reference in New Issue
Block a user