- import logging
 - 
 - from flask_restful import Resource, reqparse
 - from werkzeug.exceptions import InternalServerError, NotFound
 - 
 - import services
 - from controllers.service_api import api
 - from controllers.service_api.app.error import (
 -     AppUnavailableError,
 -     CompletionRequestError,
 -     ConversationCompletedError,
 -     NotChatAppError,
 -     ProviderModelCurrentlyNotSupportError,
 -     ProviderNotInitializeError,
 -     ProviderQuotaExceededError,
 - )
 - from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token
 - from controllers.web.error import InvokeRateLimitError as InvokeRateLimitHttpError
 - from core.app.apps.base_app_queue_manager import AppQueueManager
 - from core.app.entities.app_invoke_entities import InvokeFrom
 - from core.errors.error import (
 -     ModelCurrentlyNotSupportError,
 -     ProviderTokenNotInitError,
 -     QuotaExceededError,
 - )
 - from core.model_runtime.errors.invoke import InvokeError
 - from libs import helper
 - from libs.helper import uuid_value
 - from models.model import App, AppMode, EndUser
 - from services.app_generate_service import AppGenerateService
 - from services.errors.llm import InvokeRateLimitError
 - 
 - 
 - class CompletionApi(Resource):
 -     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
 -     def post(self, app_model: App, end_user: EndUser):
 -         if app_model.mode != "completion":
 -             raise AppUnavailableError()
 - 
 -         parser = reqparse.RequestParser()
 -         parser.add_argument("inputs", type=dict, required=True, location="json")
 -         parser.add_argument("query", type=str, location="json", default="")
 -         parser.add_argument("files", type=list, required=False, location="json")
 -         parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
 -         parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
 - 
 -         args = parser.parse_args()
 - 
 -         streaming = args["response_mode"] == "streaming"
 - 
 -         args["auto_generate_name"] = False
 - 
 -         try:
 -             response = AppGenerateService.generate(
 -                 app_model=app_model,
 -                 user=end_user,
 -                 args=args,
 -                 invoke_from=InvokeFrom.SERVICE_API,
 -                 streaming=streaming,
 -             )
 - 
 -             return helper.compact_generate_response(response)
 -         except services.errors.conversation.ConversationNotExistsError:
 -             raise NotFound("Conversation Not Exists.")
 -         except services.errors.conversation.ConversationCompletedError:
 -             raise ConversationCompletedError()
 -         except services.errors.app_model_config.AppModelConfigBrokenError:
 -             logging.exception("App model config broken.")
 -             raise AppUnavailableError()
 -         except ProviderTokenNotInitError as ex:
 -             raise ProviderNotInitializeError(ex.description)
 -         except QuotaExceededError:
 -             raise ProviderQuotaExceededError()
 -         except ModelCurrentlyNotSupportError:
 -             raise ProviderModelCurrentlyNotSupportError()
 -         except InvokeError as e:
 -             raise CompletionRequestError(e.description)
 -         except ValueError as e:
 -             raise e
 -         except Exception:
 -             logging.exception("internal server error.")
 -             raise InternalServerError()
 - 
 - 
 - class CompletionStopApi(Resource):
 -     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
 -     def post(self, app_model: App, end_user: EndUser, task_id):
 -         if app_model.mode != "completion":
 -             raise AppUnavailableError()
 - 
 -         AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
 - 
 -         return {"result": "success"}, 200
 - 
 - 
 - class ChatApi(Resource):
 -     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
 -     def post(self, app_model: App, end_user: EndUser):
 -         app_mode = AppMode.value_of(app_model.mode)
 -         if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
 -             raise NotChatAppError()
 - 
 -         parser = reqparse.RequestParser()
 -         parser.add_argument("inputs", type=dict, required=True, location="json")
 -         parser.add_argument("query", type=str, required=True, location="json")
 -         parser.add_argument("files", type=list, required=False, location="json")
 -         parser.add_argument("response_mode", type=str, choices=["blocking", "streaming"], location="json")
 -         parser.add_argument("conversation_id", type=uuid_value, location="json")
 -         parser.add_argument("retriever_from", type=str, required=False, default="dev", location="json")
 -         parser.add_argument("auto_generate_name", type=bool, required=False, default=True, location="json")
 - 
 -         args = parser.parse_args()
 - 
 -         streaming = args["response_mode"] == "streaming"
 - 
 -         try:
 -             response = AppGenerateService.generate(
 -                 app_model=app_model, user=end_user, args=args, invoke_from=InvokeFrom.SERVICE_API, streaming=streaming
 -             )
 - 
 -             return helper.compact_generate_response(response)
 -         except services.errors.conversation.ConversationNotExistsError:
 -             raise NotFound("Conversation Not Exists.")
 -         except services.errors.conversation.ConversationCompletedError:
 -             raise ConversationCompletedError()
 -         except services.errors.app_model_config.AppModelConfigBrokenError:
 -             logging.exception("App model config broken.")
 -             raise AppUnavailableError()
 -         except ProviderTokenNotInitError as ex:
 -             raise ProviderNotInitializeError(ex.description)
 -         except QuotaExceededError:
 -             raise ProviderQuotaExceededError()
 -         except ModelCurrentlyNotSupportError:
 -             raise ProviderModelCurrentlyNotSupportError()
 -         except InvokeRateLimitError as ex:
 -             raise InvokeRateLimitHttpError(ex.description)
 -         except InvokeError as e:
 -             raise CompletionRequestError(e.description)
 -         except ValueError as e:
 -             raise e
 -         except Exception:
 -             logging.exception("internal server error.")
 -             raise InternalServerError()
 - 
 - 
 - class ChatStopApi(Resource):
 -     @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True))
 -     def post(self, app_model: App, end_user: EndUser, task_id):
 -         app_mode = AppMode.value_of(app_model.mode)
 -         if app_mode not in {AppMode.CHAT, AppMode.AGENT_CHAT, AppMode.ADVANCED_CHAT}:
 -             raise NotChatAppError()
 - 
 -         AppQueueManager.set_stop_flag(task_id, InvokeFrom.SERVICE_API, end_user.id)
 - 
 -         return {"result": "success"}, 200
 - 
 - 
 - api.add_resource(CompletionApi, "/completion-messages")
 - api.add_resource(CompletionStopApi, "/completion-messages/<string:task_id>/stop")
 - api.add_resource(ChatApi, "/chat-messages")
 - api.add_resource(ChatStopApi, "/chat-messages/<string:task_id>/stop")
 
 
  |