feat: improved capability with parameters of InstructionGenerateApi

deploy/dev
stream 7 months ago
parent 807a6890ac
commit 35e40b5769

@ -1,7 +1,9 @@
import os
from typing import Sequence
from flask_login import current_user
from flask_restful import Resource, reqparse
from opik.rest_api import BadRequestError
from controllers.console import api
from controllers.console.app.error import (
@ -120,37 +122,63 @@ class InstructionGenerateApi(Resource):
@account_initialization_required
def post(self):
parser = reqparse.RequestParser()
parser.add_argument("flow_id", type=str, required=False, default="", location="json")
parser.add_argument("flow_id", type=str, required=True, default="", location="json")
parser.add_argument("node_id", type=str, required=False, default="", location="json")
parser.add_argument("current", type=str, required=False, default="", location="json")
parser.add_argument("language", type=str, required=False, default="javascript", location="json")
parser.add_argument("instruction", type=str, required=True, nullable=False, location="json")
parser.add_argument("model_config", type=dict, required=True, nullable=False, location="json")
parser.add_argument("ideal_output", type=str, required=False, default="", location="json")
args = parser.parse_args()
try:
if args["flow_id"] == "" or args["current"] == "": # Fallback for legacy endpoint
return LLMGenerator.generate_rule_config(
current_user.current_tenant_id,
if args["current"] == "" and args["node_id"] != "": # Generate from nothing for a workflow node
from models import db
from models import App
from services.workflow_service import WorkflowService
app = db.session.query(App).filter(App.id == args["flow_id"]).first()
workflow = WorkflowService().get_draft_workflow(app_model=app)
nodes:Sequence = workflow.graph_dict["nodes"]
node: dict = [node for node in nodes if node["id"] == args["node_id"]][0]
if not node:
raise BadRequestError(f"node {args['node_id']} not found")
match node_type:=node["data"]["type"]:
case "llm", "agent":
return LLMGenerator.generate_rule_config(
current_user.current_tenant_id,
instruction=args["instruction"],
model_config=args["model_config"],
no_variable=True
)
case "code":
return LLMGenerator.generate_code(
tenant_id=current_user.current_tenant_id,
instruction=args["instruction"],
model_config=args["model_config"],
code_language=args["language"],
)
case _:
raise BadRequestError(f"invalid node type: {node_type}")
if args["node_id"] == "" and args["current"] != "": # For legacy app without a workflow
return LLMGenerator.instruction_modify_legacy(
tenant_id=current_user.current_tenant_id,
flow_id=args["flow_id"],
current=args["current"],
instruction=args["instruction"],
model_config=args["model_config"],
no_variable=True
ideal_output=args["ideal_output"],
)
if args["node_id"] == "": # For legacy app without a workflow
return LLMGenerator.instruction_modify_legacy(
if args["node_id"] != "" and args["current"] != "": # For workflow node
return LLMGenerator.instruction_modify_workflow(
tenant_id=current_user.current_tenant_id,
flow_id=args["flow_id"],
node_id=args["node_id"],
current=args["current"],
instruction=args["instruction"],
model_config=args["model_config"],
ideal_output=args["ideal_output"],
)
return LLMGenerator.instruction_modify_workflow(
tenant_id=current_user.current_tenant_id,
flow_id=args["flow_id"],
node_id=args["node_id"],
current=args["current"],
instruction=args["instruction"],
model_config=args["model_config"],
)
raise BadRequestError("incompatible parameters")
except ProviderTokenNotInitError as ex:
raise ProviderNotInitializeError(ex.description)
except QuotaExceededError:

@ -410,7 +410,8 @@ class LLMGenerator:
flow_id: str,
current: str,
instruction: str,
model_config: dict
model_config: dict,
ideal_output: str | None
) -> dict:
app: App = db.session.query(App).filter(App.id == flow_id).first()
last_run: Message = (db.session.query(Message)
@ -421,7 +422,7 @@ class LLMGenerator:
return LLMGenerator.__instruction_modify_common(
tenant_id=tenant_id,
model_config=model_config,
last_run={ "error": "This hasn't been run" },
last_run=None,
current=current,
error_message="",
instruction=instruction,
@ -440,6 +441,7 @@ class LLMGenerator:
error_message=str(last_run.error),
instruction=instruction,
node_type="llm",
ideal_output=ideal_output
)
@staticmethod
@ -449,7 +451,8 @@ class LLMGenerator:
node_id: str,
current: str,
instruction: str,
model_config: dict
model_config: dict,
ideal_output: str | None
) -> dict:
from services.workflow_service import WorkflowService
app: App = db.session.query(App).filter(App.id == flow_id).first()
@ -465,11 +468,12 @@ class LLMGenerator:
return LLMGenerator.__instruction_modify_common(
tenant_id=tenant_id,
model_config=model_config,
last_run={ "error": "This hasn't been run" },
last_run=None,
current=current,
error_message="",
instruction=instruction,
node_type="llm",
ideal_output=ideal_output
)
def agent_log_of(node_execution: WorkflowNodeExecutionModel) -> Sequence:
@ -483,7 +487,7 @@ class LLMGenerator:
"error": event.error,
"data": event.data,
}
return json.dumps(map(dict_of_event, parsed))
return [dict_of_event(event) for event in parsed]
last_run_dict = {
"inputs": last_run.inputs_dict,
"status": last_run.status,
@ -499,6 +503,7 @@ class LLMGenerator:
error_message=last_run.error,
instruction=instruction,
node_type=last_run.node_type,
ideal_output=ideal_output
)
@ -507,11 +512,12 @@ class LLMGenerator:
def __instruction_modify_common(
tenant_id: str,
model_config: dict,
last_run: dict,
last_run: dict | None,
current: str,
error_message: str,
error_message: str | None,
instruction: str,
node_type: str,
ideal_output: str | None
) -> dict:
LAST_RUN = "{{#last_run#}}"
CURRENT = "{{#current#}}"
@ -543,6 +549,7 @@ class LLMGenerator:
"current": current,
"last_run": last_run,
"instruction": injected_instruction,
"ideal_output": ideal_output,
}
))
]

@ -329,6 +329,10 @@ Both your input and output should be in JSON format.
"instruction": {
"type": "string",
"description": "User's instruction to edit the current prompt"
},
"ideal_output": {
"type": "string",
"description": "The ideal output that the user expects from the large language model after modifying the prompt."
}
}
}

Loading…
Cancel
Save