@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
input=query
)
# recale llm max tokens
# recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(
@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
messages_ids=message_file_ids
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(