From 35e40b576920ab822aa718499e627f2abde4a6fb Mon Sep 17 00:00:00 2001 From: stream Date: Fri, 18 Jul 2025 15:43:37 +0800 Subject: [PATCH] feat: improved capability with parameters of InstructionGenerateApi --- api/controllers/console/app/generator.py | 58 ++++++++++++++++++------ api/core/llm_generator/llm_generator.py | 21 ++++++--- api/core/llm_generator/prompts.py | 4 ++ 3 files changed, 61 insertions(+), 22 deletions(-) diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index 71f0fdbcfe..f0c683811a 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -1,7 +1,9 @@ import os +from typing import Sequence from flask_login import current_user from flask_restful import Resource, reqparse +from opik.rest_api import BadRequestError from controllers.console import api from controllers.console.app.error import ( @@ -120,37 +122,63 @@ class InstructionGenerateApi(Resource): @account_initialization_required def post(self): parser = reqparse.RequestParser() - parser.add_argument("flow_id", type=str, required=False, default="", location="json") + parser.add_argument("flow_id", type=str, required=True, default="", location="json") parser.add_argument("node_id", type=str, required=False, default="", location="json") parser.add_argument("current", type=str, required=False, default="", location="json") + parser.add_argument("language", type=str, required=False, default="javascript", location="json") parser.add_argument("instruction", type=str, required=True, nullable=False, location="json") parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json") + parser.add_argument("ideal_output", type=str, required=False, default="", location="json") args = parser.parse_args() try: - if args["flow_id"] == "" or args["current"] == "": # Fallback for legacy endpoint - return LLMGenerator.generate_rule_config( - current_user.current_tenant_id, + if args["current"] == "" and args["node_id"] != "": # Generate from nothing for a workflow node + from models import db + from models import App + from services.workflow_service import WorkflowService + app = db.session.query(App).filter(App.id == args["flow_id"]).first() + workflow = WorkflowService().get_draft_workflow(app_model=app) + nodes:Sequence = workflow.graph_dict["nodes"] + node: dict = [node for node in nodes if node["id"] == args["node_id"]][0] + if not node: + raise BadRequestError(f"node {args['node_id']} not found") + match node_type:=node["data"]["type"]: + case "llm", "agent": + return LLMGenerator.generate_rule_config( + current_user.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + no_variable=True + ) + case "code": + return LLMGenerator.generate_code( + tenant_id=current_user.current_tenant_id, + instruction=args["instruction"], + model_config=args["model_config"], + code_language=args["language"], + ) + case _: + raise BadRequestError(f"invalid node type: {node_type}") + if args["node_id"] == "" and args["current"] != "": # For legacy app without a workflow + return LLMGenerator.instruction_modify_legacy( + tenant_id=current_user.current_tenant_id, + flow_id=args["flow_id"], + current=args["current"], instruction=args["instruction"], model_config=args["model_config"], - no_variable=True + ideal_output=args["ideal_output"], ) - if args["node_id"] == "": # For legacy app without a workflow - return LLMGenerator.instruction_modify_legacy( + if args["node_id"] != "" and args["current"] != "": # For workflow node + return LLMGenerator.instruction_modify_workflow( tenant_id=current_user.current_tenant_id, flow_id=args["flow_id"], + node_id=args["node_id"], current=args["current"], instruction=args["instruction"], model_config=args["model_config"], + ideal_output=args["ideal_output"], ) - return LLMGenerator.instruction_modify_workflow( - tenant_id=current_user.current_tenant_id, - flow_id=args["flow_id"], - node_id=args["node_id"], - current=args["current"], - instruction=args["instruction"], - model_config=args["model_config"], - ) + raise BadRequestError("incompatible parameters") except ProviderTokenNotInitError as ex: raise ProviderNotInitializeError(ex.description) except QuotaExceededError: diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 48495aa639..66d2b42ad8 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -410,7 +410,8 @@ class LLMGenerator: flow_id: str, current: str, instruction: str, - model_config: dict + model_config: dict, + ideal_output: str | None ) -> dict: app: App = db.session.query(App).filter(App.id == flow_id).first() last_run: Message = (db.session.query(Message) @@ -421,7 +422,7 @@ class LLMGenerator: return LLMGenerator.__instruction_modify_common( tenant_id=tenant_id, model_config=model_config, - last_run={ "error": "This hasn't been run" }, + last_run=None, current=current, error_message="", instruction=instruction, @@ -440,6 +441,7 @@ class LLMGenerator: error_message=str(last_run.error), instruction=instruction, node_type="llm", + ideal_output=ideal_output ) @staticmethod @@ -449,7 +451,8 @@ class LLMGenerator: node_id: str, current: str, instruction: str, - model_config: dict + model_config: dict, + ideal_output: str | None ) -> dict: from services.workflow_service import WorkflowService app: App = db.session.query(App).filter(App.id == flow_id).first() @@ -465,11 +468,12 @@ class LLMGenerator: return LLMGenerator.__instruction_modify_common( tenant_id=tenant_id, model_config=model_config, - last_run={ "error": "This hasn't been run" }, + last_run=None, current=current, error_message="", instruction=instruction, node_type="llm", + ideal_output=ideal_output ) def agent_log_of(node_execution: WorkflowNodeExecutionModel) -> Sequence: @@ -483,7 +487,7 @@ class LLMGenerator: "error": event.error, "data": event.data, } - return json.dumps(map(dict_of_event, parsed)) + return [dict_of_event(event) for event in parsed] last_run_dict = { "inputs": last_run.inputs_dict, "status": last_run.status, @@ -499,6 +503,7 @@ class LLMGenerator: error_message=last_run.error, instruction=instruction, node_type=last_run.node_type, + ideal_output=ideal_output ) @@ -507,11 +512,12 @@ class LLMGenerator: def __instruction_modify_common( tenant_id: str, model_config: dict, - last_run: dict, + last_run: dict | None, current: str, - error_message: str, + error_message: str | None, instruction: str, node_type: str, + ideal_output: str | None ) -> dict: LAST_RUN = "{{#last_run#}}" CURRENT = "{{#current#}}" @@ -543,6 +549,7 @@ class LLMGenerator: "current": current, "last_run": last_run, "instruction": injected_instruction, + "ideal_output": ideal_output, } )) ] diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index f7fd4007a4..f522e805c2 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -329,6 +329,10 @@ Both your input and output should be in JSON format. "instruction": { "type": "string", "description": "User's instruction to edit the current prompt" + }, + "ideal_output": { + "type": "string", + "description": "The ideal output that the user expects from the large language model after modifying the prompt." } } }