|
|
|
@ -438,7 +438,11 @@ class LLMNode(BaseNode):
|
|
|
|
stop = model_config.stop
|
|
|
|
stop = model_config.stop
|
|
|
|
|
|
|
|
|
|
|
|
vision_enabled = node_data.vision.enabled
|
|
|
|
vision_enabled = node_data.vision.enabled
|
|
|
|
|
|
|
|
filtered_prompt_messages = []
|
|
|
|
for prompt_message in prompt_messages:
|
|
|
|
for prompt_message in prompt_messages:
|
|
|
|
|
|
|
|
if prompt_message.is_empty():
|
|
|
|
|
|
|
|
continue
|
|
|
|
|
|
|
|
|
|
|
|
if not isinstance(prompt_message.content, str):
|
|
|
|
if not isinstance(prompt_message.content, str):
|
|
|
|
prompt_message_content = []
|
|
|
|
prompt_message_content = []
|
|
|
|
for content_item in prompt_message.content:
|
|
|
|
for content_item in prompt_message.content:
|
|
|
|
@ -453,7 +457,13 @@ class LLMNode(BaseNode):
|
|
|
|
and prompt_message_content[0].type == PromptMessageContentType.TEXT):
|
|
|
|
and prompt_message_content[0].type == PromptMessageContentType.TEXT):
|
|
|
|
prompt_message.content = prompt_message_content[0].data
|
|
|
|
prompt_message.content = prompt_message_content[0].data
|
|
|
|
|
|
|
|
|
|
|
|
return prompt_messages, stop
|
|
|
|
filtered_prompt_messages.append(prompt_message)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if not filtered_prompt_messages:
|
|
|
|
|
|
|
|
raise ValueError("No prompt found in the LLM configuration. "
|
|
|
|
|
|
|
|
"Please ensure a prompt is properly configured before proceeding.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return filtered_prompt_messages, stop
|
|
|
|
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
@classmethod
|
|
|
|
def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
|
|
|
|
def deduct_llm_quota(cls, tenant_id: str, model_instance: ModelInstance, usage: LLMUsage) -> None:
|
|
|
|
|