fix typo recale to recalc (#2670)

pull/2673/head
cola 2 years ago committed by GitHub
parent 83a6b0c626
commit 34387ec0f1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

@ -130,7 +130,7 @@ class AssistantCotApplicationRunner(BaseAssistantApplicationRunner):
input=query input=query
) )
# recale llm max tokens # recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm( chunks: Generator[LLMResultChunk, None, None] = model_instance.invoke_llm(

@ -105,7 +105,7 @@ class AssistantFunctionCallApplicationRunner(BaseAssistantApplicationRunner):
messages_ids=message_file_ids messages_ids=message_file_ids
) )
# recale llm max tokens # recalc llm max tokens
self.recalc_llm_max_tokens(self.model_config, prompt_messages) self.recalc_llm_max_tokens(self.model_config, prompt_messages)
# invoke model # invoke model
chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm( chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm(

Loading…
Cancel
Save