|
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308 |
- from collections.abc import Sequence
-
- from flask_login import current_user
- from flask_restx import Resource, fields, reqparse
-
- from controllers.console import api, console_ns
- from controllers.console.app.error import (
- CompletionRequestError,
- ProviderModelCurrentlyNotSupportError,
- ProviderNotInitializeError,
- ProviderQuotaExceededError,
- )
- from controllers.console.wraps import account_initialization_required, setup_required
- from core.errors.error import ModelCurrentlyNotSupportError, ProviderTokenNotInitError, QuotaExceededError
- from core.helper.code_executor.javascript.javascript_code_provider import JavascriptCodeProvider
- from core.helper.code_executor.python3.python3_code_provider import Python3CodeProvider
- from core.llm_generator.llm_generator import LLMGenerator
- from core.model_runtime.errors.invoke import InvokeError
- from libs.login import login_required
-
-
- @console_ns.route("/rule-generate")
- class RuleGenerateApi(Resource):
- @api.doc("generate_rule_config")
- @api.doc(description="Generate rule configuration using LLM")
- @api.expect(
- api.model(
- "RuleGenerateRequest",
- {
- "instruction": fields.String(required=True, description="Rule generation instruction"),
- "model_config": fields.Raw(required=True, description="Model configuration"),
- "no_variable": fields.Boolean(required=True, default=False, description="Whether to exclude variables"),
- },
- )
- )
- @api.response(200, "Rule configuration generated successfully")
- @api.response(400, "Invalid request parameters")
- @api.response(402, "Provider quota exceeded")
- @setup_required
- @login_required
- @account_initialization_required
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("instruction", type=str, required=True, nullable=False, location="json")
- parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json")
- parser.add_argument("no_variable", type=bool, required=True, default=False, location="json")
- args = parser.parse_args()
-
- account = current_user
- try:
- rules = LLMGenerator.generate_rule_config(
- tenant_id=account.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- no_variable=args["no_variable"],
- )
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeError as e:
- raise CompletionRequestError(e.description)
-
- return rules
-
-
- @console_ns.route("/rule-code-generate")
- class RuleCodeGenerateApi(Resource):
- @api.doc("generate_rule_code")
- @api.doc(description="Generate code rules using LLM")
- @api.expect(
- api.model(
- "RuleCodeGenerateRequest",
- {
- "instruction": fields.String(required=True, description="Code generation instruction"),
- "model_config": fields.Raw(required=True, description="Model configuration"),
- "no_variable": fields.Boolean(required=True, default=False, description="Whether to exclude variables"),
- "code_language": fields.String(
- default="javascript", description="Programming language for code generation"
- ),
- },
- )
- )
- @api.response(200, "Code rules generated successfully")
- @api.response(400, "Invalid request parameters")
- @api.response(402, "Provider quota exceeded")
- @setup_required
- @login_required
- @account_initialization_required
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("instruction", type=str, required=True, nullable=False, location="json")
- parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json")
- parser.add_argument("no_variable", type=bool, required=True, default=False, location="json")
- parser.add_argument("code_language", type=str, required=False, default="javascript", location="json")
- args = parser.parse_args()
-
- account = current_user
- try:
- code_result = LLMGenerator.generate_code(
- tenant_id=account.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- code_language=args["code_language"],
- )
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeError as e:
- raise CompletionRequestError(e.description)
-
- return code_result
-
-
- @console_ns.route("/rule-structured-output-generate")
- class RuleStructuredOutputGenerateApi(Resource):
- @api.doc("generate_structured_output")
- @api.doc(description="Generate structured output rules using LLM")
- @api.expect(
- api.model(
- "StructuredOutputGenerateRequest",
- {
- "instruction": fields.String(required=True, description="Structured output generation instruction"),
- "model_config": fields.Raw(required=True, description="Model configuration"),
- },
- )
- )
- @api.response(200, "Structured output generated successfully")
- @api.response(400, "Invalid request parameters")
- @api.response(402, "Provider quota exceeded")
- @setup_required
- @login_required
- @account_initialization_required
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("instruction", type=str, required=True, nullable=False, location="json")
- parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json")
- args = parser.parse_args()
-
- account = current_user
- try:
- structured_output = LLMGenerator.generate_structured_output(
- tenant_id=account.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- )
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeError as e:
- raise CompletionRequestError(e.description)
-
- return structured_output
-
-
- @console_ns.route("/instruction-generate")
- class InstructionGenerateApi(Resource):
- @api.doc("generate_instruction")
- @api.doc(description="Generate instruction for workflow nodes or general use")
- @api.expect(
- api.model(
- "InstructionGenerateRequest",
- {
- "flow_id": fields.String(required=True, description="Workflow/Flow ID"),
- "node_id": fields.String(description="Node ID for workflow context"),
- "current": fields.String(description="Current instruction text"),
- "language": fields.String(default="javascript", description="Programming language (javascript/python)"),
- "instruction": fields.String(required=True, description="Instruction for generation"),
- "model_config": fields.Raw(required=True, description="Model configuration"),
- "ideal_output": fields.String(description="Expected ideal output"),
- },
- )
- )
- @api.response(200, "Instruction generated successfully")
- @api.response(400, "Invalid request parameters or flow/workflow not found")
- @api.response(402, "Provider quota exceeded")
- @setup_required
- @login_required
- @account_initialization_required
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("flow_id", type=str, required=True, default="", location="json")
- parser.add_argument("node_id", type=str, required=False, default="", location="json")
- parser.add_argument("current", type=str, required=False, default="", location="json")
- parser.add_argument("language", type=str, required=False, default="javascript", location="json")
- parser.add_argument("instruction", type=str, required=True, nullable=False, location="json")
- parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json")
- parser.add_argument("ideal_output", type=str, required=False, default="", location="json")
- args = parser.parse_args()
- code_template = (
- Python3CodeProvider.get_default_code()
- if args["language"] == "python"
- else (JavascriptCodeProvider.get_default_code())
- if args["language"] == "javascript"
- else ""
- )
- try:
- # Generate from nothing for a workflow node
- if (args["current"] == code_template or args["current"] == "") and args["node_id"] != "":
- from models import App, db
- from services.workflow_service import WorkflowService
-
- app = db.session.query(App).where(App.id == args["flow_id"]).first()
- if not app:
- return {"error": f"app {args['flow_id']} not found"}, 400
- workflow = WorkflowService().get_draft_workflow(app_model=app)
- if not workflow:
- return {"error": f"workflow {args['flow_id']} not found"}, 400
- nodes: Sequence = workflow.graph_dict["nodes"]
- node = [node for node in nodes if node["id"] == args["node_id"]]
- if len(node) == 0:
- return {"error": f"node {args['node_id']} not found"}, 400
- node_type = node[0]["data"]["type"]
- match node_type:
- case "llm":
- return LLMGenerator.generate_rule_config(
- current_user.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- no_variable=True,
- )
- case "agent":
- return LLMGenerator.generate_rule_config(
- current_user.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- no_variable=True,
- )
- case "code":
- return LLMGenerator.generate_code(
- tenant_id=current_user.current_tenant_id,
- instruction=args["instruction"],
- model_config=args["model_config"],
- code_language=args["language"],
- )
- case _:
- return {"error": f"invalid node type: {node_type}"}
- if args["node_id"] == "" and args["current"] != "": # For legacy app without a workflow
- return LLMGenerator.instruction_modify_legacy(
- tenant_id=current_user.current_tenant_id,
- flow_id=args["flow_id"],
- current=args["current"],
- instruction=args["instruction"],
- model_config=args["model_config"],
- ideal_output=args["ideal_output"],
- )
- if args["node_id"] != "" and args["current"] != "": # For workflow node
- return LLMGenerator.instruction_modify_workflow(
- tenant_id=current_user.current_tenant_id,
- flow_id=args["flow_id"],
- node_id=args["node_id"],
- current=args["current"],
- instruction=args["instruction"],
- model_config=args["model_config"],
- ideal_output=args["ideal_output"],
- )
- return {"error": "incompatible parameters"}, 400
- except ProviderTokenNotInitError as ex:
- raise ProviderNotInitializeError(ex.description)
- except QuotaExceededError:
- raise ProviderQuotaExceededError()
- except ModelCurrentlyNotSupportError:
- raise ProviderModelCurrentlyNotSupportError()
- except InvokeError as e:
- raise CompletionRequestError(e.description)
-
-
- @console_ns.route("/instruction-generate/template")
- class InstructionGenerationTemplateApi(Resource):
- @api.doc("get_instruction_template")
- @api.doc(description="Get instruction generation template")
- @api.expect(
- api.model(
- "InstructionTemplateRequest",
- {
- "instruction": fields.String(required=True, description="Template instruction"),
- "ideal_output": fields.String(description="Expected ideal output"),
- },
- )
- )
- @api.response(200, "Template retrieved successfully")
- @api.response(400, "Invalid request parameters")
- @setup_required
- @login_required
- @account_initialization_required
- def post(self):
- parser = reqparse.RequestParser()
- parser.add_argument("type", type=str, required=True, default=False, location="json")
- args = parser.parse_args()
- match args["type"]:
- case "prompt":
- from core.llm_generator.prompts import INSTRUCTION_GENERATE_TEMPLATE_PROMPT
-
- return {"data": INSTRUCTION_GENERATE_TEMPLATE_PROMPT}
- case "code":
- from core.llm_generator.prompts import INSTRUCTION_GENERATE_TEMPLATE_CODE
-
- return {"data": INSTRUCTION_GENERATE_TEMPLATE_CODE}
- case _:
- raise ValueError(f"Invalid type: {args['type']}")
|