diff --git a/api/app_factory.py b/api/app_factory.py index 3a258be28f..81155cbacd 100644 --- a/api/app_factory.py +++ b/api/app_factory.py @@ -32,7 +32,7 @@ def create_app() -> DifyApp: initialize_extensions(app) end_time = time.perf_counter() if dify_config.DEBUG: - logging.info(f"Finished create_app ({round((end_time - start_time) * 1000, 2)} ms)") + logging.info("Finished create_app (%s ms)", round((end_time - start_time) * 1000, 2)) return app @@ -91,14 +91,14 @@ def initialize_extensions(app: DifyApp): is_enabled = ext.is_enabled() if hasattr(ext, "is_enabled") else True if not is_enabled: if dify_config.DEBUG: - logging.info(f"Skipped {short_name}") + logging.info("Skipped %s", short_name) continue start_time = time.perf_counter() ext.init_app(app) end_time = time.perf_counter() if dify_config.DEBUG: - logging.info(f"Loaded {short_name} ({round((end_time - start_time) * 1000, 2)} ms)") + logging.info("Loaded %s (%s ms)", short_name, round((end_time - start_time) * 1000, 2)) def create_migrations_app(): diff --git a/api/commands.py b/api/commands.py index c2e62ec261..a39bc4817a 100644 --- a/api/commands.py +++ b/api/commands.py @@ -735,7 +735,7 @@ where sites.id is null limit 1000""" except Exception: failed_app_ids.append(app_id) click.echo(click.style("Failed to fix missing site for app {}".format(app_id), fg="red")) - logging.exception(f"Failed to fix app related site missing issue, app_id: {app_id}") + logging.exception("Failed to fix app related site missing issue, app_id: %s", app_id) continue if not processed_count: diff --git a/api/configs/app_config.py b/api/configs/app_config.py index 20f8c40427..d3b1cf9d5b 100644 --- a/api/configs/app_config.py +++ b/api/configs/app_config.py @@ -41,7 +41,7 @@ class RemoteSettingsSourceFactory(PydanticBaseSettingsSource): case RemoteSettingsSourceName.NACOS: remote_source = NacosSettingsSource(current_state) case _: - logger.warning(f"Unsupported remote source: {remote_source_name}") + logger.warning("Unsupported remote source: %s", remote_source_name) return {} d: dict[str, Any] = {} diff --git a/api/configs/remote_settings_sources/apollo/client.py b/api/configs/remote_settings_sources/apollo/client.py index 88b30d3987..b3022d7fc9 100644 --- a/api/configs/remote_settings_sources/apollo/client.py +++ b/api/configs/remote_settings_sources/apollo/client.py @@ -76,7 +76,7 @@ class ApolloClient: code, body = http_request(url, timeout=3, headers=self._sign_headers(url)) if code == 200: if not body: - logger.error(f"get_json_from_net load configs failed, body is {body}") + logger.error("get_json_from_net load configs failed, body is %s", body) return None data = json.loads(body) data = data["configurations"] @@ -222,7 +222,7 @@ class ApolloClient: return if http_code == 200: if not body: - logger.error(f"_long_poll load configs failed,body is {body}") + logger.error("_long_poll load configs failed,body is %s", body) return data = json.loads(body) for entry in data: @@ -278,7 +278,7 @@ class ApolloClient: code, body = http_request(url, timeout=3, headers=self._sign_headers(url)) if code == 200: if not body: - logger.error(f"_do_heart_beat load configs failed,body is {body}") + logger.error("_do_heart_beat load configs failed,body is %s", body) return None data = json.loads(body) if self.last_release_key == data["releaseKey"]: diff --git a/api/controllers/console/auth/data_source_oauth.py b/api/controllers/console/auth/data_source_oauth.py index 4c9697cc32..4940b48754 100644 --- a/api/controllers/console/auth/data_source_oauth.py +++ b/api/controllers/console/auth/data_source_oauth.py @@ -81,7 +81,7 @@ class OAuthDataSourceBinding(Resource): oauth_provider.get_access_token(code) except requests.exceptions.HTTPError as e: logging.exception( - f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}" + "An error occurred during the OAuthCallback process with %s: %s", provider, e.response.text ) return {"error": "OAuth data source process failed"}, 400 @@ -103,7 +103,9 @@ class OAuthDataSourceSync(Resource): try: oauth_provider.sync_data_source(binding_id) except requests.exceptions.HTTPError as e: - logging.exception(f"An error occurred during the OAuthCallback process with {provider}: {e.response.text}") + logging.exception( + "An error occurred during the OAuthCallback process with %s: %s", provider, e.response.text + ) return {"error": "OAuth data source process failed"}, 400 return {"result": "success"}, 200 diff --git a/api/controllers/console/auth/oauth.py b/api/controllers/console/auth/oauth.py index d0a4f3ff6d..4a6cb99390 100644 --- a/api/controllers/console/auth/oauth.py +++ b/api/controllers/console/auth/oauth.py @@ -80,7 +80,7 @@ class OAuthCallback(Resource): user_info = oauth_provider.get_user_info(token) except requests.exceptions.RequestException as e: error_text = e.response.text if e.response else str(e) - logging.exception(f"An error occurred during the OAuth process with {provider}: {error_text}") + logging.exception("An error occurred during the OAuth process with %s: %s", provider, error_text) return {"error": "OAuth process failed"}, 400 if invite_token and RegisterService.is_valid_invite_token(invite_token): diff --git a/api/controllers/console/datasets/datasets_document.py b/api/controllers/console/datasets/datasets_document.py index d14b208a4b..b6e91dd98e 100644 --- a/api/controllers/console/datasets/datasets_document.py +++ b/api/controllers/console/datasets/datasets_document.py @@ -970,7 +970,7 @@ class DocumentRetryApi(DocumentResource): raise DocumentAlreadyFinishedError() retry_documents.append(document) except Exception: - logging.exception(f"Failed to retry document, document id: {document_id}") + logging.exception("Failed to retry document, document id: %s", document_id) continue # retry document DocumentService.retry_document(dataset_id, retry_documents) diff --git a/api/controllers/console/explore/installed_app.py b/api/controllers/console/explore/installed_app.py index ffdf73c368..6d9f794307 100644 --- a/api/controllers/console/explore/installed_app.py +++ b/api/controllers/console/explore/installed_app.py @@ -74,7 +74,7 @@ class InstalledAppsListApi(Resource): ): res.append(installed_app) installed_app_list = res - logger.debug(f"installed_app_list: {installed_app_list}, user_id: {user_id}") + logger.debug("installed_app_list: %s, user_id: %s", installed_app_list, user_id) installed_app_list.sort( key=lambda app: ( diff --git a/api/controllers/console/version.py b/api/controllers/console/version.py index 447cc358f8..75a6a4c969 100644 --- a/api/controllers/console/version.py +++ b/api/controllers/console/version.py @@ -55,7 +55,7 @@ def _has_new_version(*, latest_version: str, current_version: str) -> bool: # Compare versions return latest > current except version.InvalidVersion: - logging.warning(f"Invalid version format: latest={latest_version}, current={current_version}") + logging.warning("Invalid version format: latest=%s, current=%s", latest_version, current_version) return False diff --git a/api/controllers/console/workspace/models.py b/api/controllers/console/workspace/models.py index 37d0f6c764..514d1084c4 100644 --- a/api/controllers/console/workspace/models.py +++ b/api/controllers/console/workspace/models.py @@ -73,8 +73,9 @@ class DefaultModelApi(Resource): ) except Exception as ex: logging.exception( - f"Failed to update default model, model type: {model_setting['model_type']}," - f" model:{model_setting.get('model')}" + "Failed to update default model, model type: %s, model: %s", + model_setting["model_type"], + model_setting.get("model"), ) raise ex @@ -160,8 +161,10 @@ class ModelProviderModelApi(Resource): ) except CredentialsValidateFailedError as ex: logging.exception( - f"Failed to save model credentials, tenant_id: {tenant_id}," - f" model: {args.get('model')}, model_type: {args.get('model_type')}" + "Failed to save model credentials, tenant_id: %s, model: %s, model_type: %s", + tenant_id, + args.get("model"), + args.get("model_type"), ) raise ValueError(str(ex)) diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 610a5bb278..52ae20ee16 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -600,5 +600,5 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): if len(e.args) > 0 and e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedError() else: - logger.exception(f"Failed to process generate task pipeline, conversation_id: {conversation.id}") + logger.exception("Failed to process generate task pipeline, conversation_id: %s", conversation.id) raise e diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index dc27076a4d..abb8db34de 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -271,7 +271,7 @@ class AdvancedChatAppGenerateTaskPipeline: start_listener_time = time.time() yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id) except Exception: - logger.exception(f"Failed to listen audio message, task_id: {task_id}") + logger.exception("Failed to listen audio message, task_id: %s", task_id) break if tts_publisher: yield MessageAudioEndStreamResponse(audio="", task_id=task_id) diff --git a/api/core/app/apps/message_based_app_generator.py b/api/core/app/apps/message_based_app_generator.py index 7dd9904eeb..11c979765b 100644 --- a/api/core/app/apps/message_based_app_generator.py +++ b/api/core/app/apps/message_based_app_generator.py @@ -78,7 +78,7 @@ class MessageBasedAppGenerator(BaseAppGenerator): if len(e.args) > 0 and e.args[0] == "I/O operation on closed file.": # ignore this error raise GenerateTaskStoppedError() else: - logger.exception(f"Failed to handle response, conversation_id: {conversation.id}") + logger.exception("Failed to handle response, conversation_id: %s", conversation.id) raise e def _get_app_model_config(self, app_model: App, conversation: Optional[Conversation] = None) -> AppModelConfig: diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 4c36f63c71..22b0234604 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -483,7 +483,7 @@ class WorkflowAppGenerator(BaseAppGenerator): try: runner.run() except GenerateTaskStoppedError as e: - logger.warning(f"Task stopped: {str(e)}") + logger.warning("Task stopped: %s", str(e)) pass except InvokeAuthorizationError: queue_manager.publish_error( @@ -540,6 +540,6 @@ class WorkflowAppGenerator(BaseAppGenerator): raise GenerateTaskStoppedError() else: logger.exception( - f"Fails to process generate task pipeline, task_id: {application_generate_entity.task_id}" + "Fails to process generate task pipeline, task_id: %s", application_generate_entity.task_id ) raise e diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index e31a316c56..b1e9a340bd 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -246,7 +246,7 @@ class WorkflowAppGenerateTaskPipeline: else: yield MessageAudioStreamResponse(audio=audio_trunk.audio, task_id=task_id) except Exception: - logger.exception(f"Fails to get audio trunk, task_id: {task_id}") + logger.exception("Fails to get audio trunk, task_id: %s", task_id) break if tts_publisher: yield MessageAudioEndStreamResponse(audio="", task_id=task_id) diff --git a/api/core/app/features/annotation_reply/annotation_reply.py b/api/core/app/features/annotation_reply/annotation_reply.py index 54dc69302a..b829340401 100644 --- a/api/core/app/features/annotation_reply/annotation_reply.py +++ b/api/core/app/features/annotation_reply/annotation_reply.py @@ -83,7 +83,7 @@ class AnnotationReplyFeature: return annotation except Exception as e: - logger.warning(f"Query annotation failed, exception: {str(e)}.") + logger.warning("Query annotation failed, exception: %s.", str(e)) return None return None diff --git a/api/core/app/task_pipeline/message_cycle_manager.py b/api/core/app/task_pipeline/message_cycle_manager.py index 824da0b934..f0e9425e3f 100644 --- a/api/core/app/task_pipeline/message_cycle_manager.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -97,7 +97,7 @@ class MessageCycleManager: conversation.name = name except Exception as e: if dify_config.DEBUG: - logging.exception(f"generate conversation name failed, conversation_id: {conversation_id}") + logging.exception("generate conversation name failed, conversation_id: %s", conversation_id) pass db.session.merge(conversation) diff --git a/api/core/entities/provider_configuration.py b/api/core/entities/provider_configuration.py index af5c18e267..9aaa1f0b10 100644 --- a/api/core/entities/provider_configuration.py +++ b/api/core/entities/provider_configuration.py @@ -900,7 +900,7 @@ class ProviderConfiguration(BaseModel): credentials=copy_credentials, ) except Exception as ex: - logger.warning(f"get custom model schema failed, {ex}") + logger.warning("get custom model schema failed, %s", ex) continue if not custom_model_schema: @@ -1009,7 +1009,7 @@ class ProviderConfiguration(BaseModel): credentials=model_configuration.credentials, ) except Exception as ex: - logger.warning(f"get custom model schema failed, {ex}") + logger.warning("get custom model schema failed, %s", ex) continue if not custom_model_schema: diff --git a/api/core/extension/extensible.py b/api/core/extension/extensible.py index 06fdb089d4..557f7eb1ed 100644 --- a/api/core/extension/extensible.py +++ b/api/core/extension/extensible.py @@ -66,7 +66,7 @@ class Extensible: # Check for extension module file if (extension_name + ".py") not in file_names: - logging.warning(f"Missing {extension_name}.py file in {subdir_path}, Skip.") + logging.warning("Missing %s.py file in %s, Skip.", extension_name, subdir_path) continue # Check for builtin flag and position @@ -95,7 +95,7 @@ class Extensible: break if not extension_class: - logging.warning(f"Missing subclass of {cls.__name__} in {module_name}, Skip.") + logging.warning("Missing subclass of %s in %s, Skip.", cls.__name__, module_name) continue # Load schema if not builtin @@ -103,7 +103,7 @@ class Extensible: if not builtin: json_path = os.path.join(subdir_path, "schema.json") if not os.path.exists(json_path): - logging.warning(f"Missing schema.json file in {subdir_path}, Skip.") + logging.warning("Missing schema.json file in %s, Skip.", subdir_path) continue with open(json_path, encoding="utf-8") as f: diff --git a/api/core/helper/moderation.py b/api/core/helper/moderation.py index a324ac2767..86bac4119a 100644 --- a/api/core/helper/moderation.py +++ b/api/core/helper/moderation.py @@ -55,7 +55,7 @@ def check_moderation(tenant_id: str, model_config: ModelConfigWithCredentialsEnt if moderation_result is True: return True except Exception: - logger.exception(f"Fails to check moderation, provider_name: {provider_name}") + logger.exception("Fails to check moderation, provider_name: %s", provider_name) raise InvokeBadRequestError("Rate limit exceeded, please try again later.") return False diff --git a/api/core/helper/module_import_helper.py b/api/core/helper/module_import_helper.py index 9a041667e4..251309fa2c 100644 --- a/api/core/helper/module_import_helper.py +++ b/api/core/helper/module_import_helper.py @@ -30,7 +30,7 @@ def import_module_from_source(*, module_name: str, py_file_path: AnyStr, use_laz spec.loader.exec_module(module) return module except Exception as e: - logging.exception(f"Failed to load module {module_name} from script file '{py_file_path!r}'") + logging.exception("Failed to load module %s from script file '%s'", module_name, repr(py_file_path)) raise e diff --git a/api/core/helper/ssrf_proxy.py b/api/core/helper/ssrf_proxy.py index 11f245812e..329527633c 100644 --- a/api/core/helper/ssrf_proxy.py +++ b/api/core/helper/ssrf_proxy.py @@ -73,10 +73,12 @@ def make_request(method, url, max_retries=SSRF_DEFAULT_MAX_RETRIES, **kwargs): if response.status_code not in STATUS_FORCELIST: return response else: - logging.warning(f"Received status code {response.status_code} for URL {url} which is in the force list") + logging.warning( + "Received status code %s for URL %s which is in the force list", response.status_code, url + ) except httpx.RequestError as e: - logging.warning(f"Request to URL {url} failed on attempt {retries + 1}: {e}") + logging.warning("Request to URL %s failed on attempt %s: %s", url, retries + 1, e) if max_retries == 0: raise diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 331ac933c8..f6bba25402 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -170,7 +170,7 @@ class LLMGenerator: error = str(e) error_step = "generate rule config" except Exception as e: - logging.exception(f"Failed to generate rule config, model: {model_config.get('name')}") + logging.exception("Failed to generate rule config, model: %s", model_config.get("name")) rule_config["error"] = str(e) rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else "" @@ -267,7 +267,7 @@ class LLMGenerator: error_step = "generate conversation opener" except Exception as e: - logging.exception(f"Failed to generate rule config, model: {model_config.get('name')}") + logging.exception("Failed to generate rule config, model: %s", model_config.get("name")) rule_config["error"] = str(e) rule_config["error"] = f"Failed to {error_step}. Error: {error}" if error else "" @@ -323,7 +323,7 @@ class LLMGenerator: return {"code": "", "language": code_language, "error": f"Failed to generate code. Error: {error}"} except Exception as e: logging.exception( - f"Failed to invoke LLM model, model: {model_config.get('name')}, language: {code_language}" + "Failed to invoke LLM model, model: %s, language: %s", model_config.get("name"), code_language ) return {"code": "", "language": code_language, "error": f"An unexpected error occurred: {str(e)}"} @@ -395,5 +395,5 @@ class LLMGenerator: error = str(e) return {"output": "", "error": f"Failed to generate JSON Schema. Error: {error}"} except Exception as e: - logging.exception(f"Failed to invoke LLM model, model: {model_config.get('name')}") + logging.exception("Failed to invoke LLM model, model: %s", model_config.get("name")) return {"output": "", "error": f"An unexpected error occurred: {str(e)}"} diff --git a/api/core/mcp/client/sse_client.py b/api/core/mcp/client/sse_client.py index 91debcc8f9..4226e77f7e 100644 --- a/api/core/mcp/client/sse_client.py +++ b/api/core/mcp/client/sse_client.py @@ -88,7 +88,7 @@ class SSETransport: status_queue: Queue to put status updates. """ endpoint_url = urljoin(self.url, sse_data) - logger.info(f"Received endpoint URL: {endpoint_url}") + logger.info("Received endpoint URL: %s", endpoint_url) if not self._validate_endpoint_url(endpoint_url): error_msg = f"Endpoint origin does not match connection origin: {endpoint_url}" @@ -107,7 +107,7 @@ class SSETransport: """ try: message = types.JSONRPCMessage.model_validate_json(sse_data) - logger.debug(f"Received server message: {message}") + logger.debug("Received server message: %s", message) session_message = SessionMessage(message) read_queue.put(session_message) except Exception as exc: @@ -128,7 +128,7 @@ class SSETransport: case "message": self._handle_message_event(sse.data, read_queue) case _: - logger.warning(f"Unknown SSE event: {sse.event}") + logger.warning("Unknown SSE event: %s", sse.event) def sse_reader(self, event_source, read_queue: ReadQueue, status_queue: StatusQueue) -> None: """Read and process SSE events. @@ -142,7 +142,7 @@ class SSETransport: for sse in event_source.iter_sse(): self._handle_sse_event(sse, read_queue, status_queue) except httpx.ReadError as exc: - logger.debug(f"SSE reader shutting down normally: {exc}") + logger.debug("SSE reader shutting down normally: %s", exc) except Exception as exc: read_queue.put(exc) finally: @@ -165,7 +165,7 @@ class SSETransport: ), ) response.raise_for_status() - logger.debug(f"Client message sent successfully: {response.status_code}") + logger.debug("Client message sent successfully: %s", response.status_code) def post_writer(self, client: httpx.Client, endpoint_url: str, write_queue: WriteQueue) -> None: """Handle writing messages to the server. @@ -190,7 +190,7 @@ class SSETransport: except queue.Empty: continue except httpx.ReadError as exc: - logger.debug(f"Post writer shutting down normally: {exc}") + logger.debug("Post writer shutting down normally: %s", exc) except Exception as exc: logger.exception("Error writing messages") write_queue.put(exc) @@ -326,7 +326,7 @@ def send_message(http_client: httpx.Client, endpoint_url: str, session_message: ), ) response.raise_for_status() - logger.debug(f"Client message sent successfully: {response.status_code}") + logger.debug("Client message sent successfully: %s", response.status_code) except Exception as exc: logger.exception("Error sending message") raise @@ -349,13 +349,13 @@ def read_messages( if sse.event == "message": try: message = types.JSONRPCMessage.model_validate_json(sse.data) - logger.debug(f"Received server message: {message}") + logger.debug("Received server message: %s", message) yield SessionMessage(message) except Exception as exc: logger.exception("Error parsing server message") yield exc else: - logger.warning(f"Unknown SSE event: {sse.event}") + logger.warning("Unknown SSE event: %s", sse.event) except Exception as exc: logger.exception("Error reading SSE messages") yield exc diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index fbd8d05f9e..ca414ebb93 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -129,7 +129,7 @@ class StreamableHTTPTransport: new_session_id = response.headers.get(MCP_SESSION_ID) if new_session_id: self.session_id = new_session_id - logger.info(f"Received session ID: {self.session_id}") + logger.info("Received session ID: %s", self.session_id) def _handle_sse_event( self, @@ -142,7 +142,7 @@ class StreamableHTTPTransport: if sse.event == "message": try: message = JSONRPCMessage.model_validate_json(sse.data) - logger.debug(f"SSE message: {message}") + logger.debug("SSE message: %s", message) # If this is a response and we have original_request_id, replace it if original_request_id is not None and isinstance(message.root, JSONRPCResponse | JSONRPCError): @@ -168,7 +168,7 @@ class StreamableHTTPTransport: logger.debug("Received ping event") return False else: - logger.warning(f"Unknown SSE event: {sse.event}") + logger.warning("Unknown SSE event: %s", sse.event) return False def handle_get_stream( @@ -197,7 +197,7 @@ class StreamableHTTPTransport: self._handle_sse_event(sse, server_to_client_queue) except Exception as exc: - logger.debug(f"GET stream error (non-fatal): {exc}") + logger.debug("GET stream error (non-fatal): %s", exc) def _handle_resumption_request(self, ctx: RequestContext) -> None: """Handle a resumption request using GET with SSE.""" @@ -352,7 +352,7 @@ class StreamableHTTPTransport: # Check if this is a resumption request is_resumption = bool(metadata and metadata.resumption_token) - logger.debug(f"Sending client message: {message}") + logger.debug("Sending client message: %s", message) # Handle initialized notification if self._is_initialized_notification(message): @@ -389,9 +389,9 @@ class StreamableHTTPTransport: if response.status_code == 405: logger.debug("Server does not allow session termination") elif response.status_code != 200: - logger.warning(f"Session termination failed: {response.status_code}") + logger.warning("Session termination failed: %s", response.status_code) except Exception as exc: - logger.warning(f"Session termination failed: {exc}") + logger.warning("Session termination failed: %s", exc) def get_session_id(self) -> str | None: """Get the current session ID.""" diff --git a/api/core/mcp/mcp_client.py b/api/core/mcp/mcp_client.py index 5fe52c008a..875d13de05 100644 --- a/api/core/mcp/mcp_client.py +++ b/api/core/mcp/mcp_client.py @@ -75,7 +75,7 @@ class MCPClient: self.connect_server(client_factory, method_name) else: try: - logger.debug(f"Not supported method {method_name} found in URL path, trying default 'mcp' method.") + logger.debug("Not supported method %s found in URL path, trying default 'mcp' method.", method_name) self.connect_server(sse_client, "sse") except MCPConnectionError: logger.debug("MCP connection failed with 'sse', falling back to 'mcp' method.") diff --git a/api/core/mcp/session/base_session.py b/api/core/mcp/session/base_session.py index 7734b8fdd9..3b6c9a7424 100644 --- a/api/core/mcp/session/base_session.py +++ b/api/core/mcp/session/base_session.py @@ -368,7 +368,7 @@ class BaseSession( self._handle_incoming(notification) except Exception as e: # For other validation errors, log and continue - logging.warning(f"Failed to validate notification: {e}. Message was: {message.message.root}") + logging.warning("Failed to validate notification: %s. Message was: %s", e, message.message.root) else: # Response or error response_queue = self._response_streams.get(message.message.root.id) if response_queue is not None: diff --git a/api/core/model_manager.py b/api/core/model_manager.py index 4886ffe244..97e2880567 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -535,9 +535,21 @@ class LBModelManager: if dify_config.DEBUG: logger.info( - f"Model LB\nid: {config.id}\nname:{config.name}\n" - f"tenant_id: {self._tenant_id}\nprovider: {self._provider}\n" - f"model_type: {self._model_type.value}\nmodel: {self._model}" + logger.info( + """Model LB +id: %s +name:%s +tenant_id: %s +provider: %s +model_type: %s +model: %s""", + config.id, + config.name, + self._tenant_id, + self._provider, + self._model_type.value, + self._model, + ) ) return config diff --git a/api/core/model_runtime/model_providers/__base/large_language_model.py b/api/core/model_runtime/model_providers/__base/large_language_model.py index e2cc576f83..ce378b443d 100644 --- a/api/core/model_runtime/model_providers/__base/large_language_model.py +++ b/api/core/model_runtime/model_providers/__base/large_language_model.py @@ -440,7 +440,9 @@ class LargeLanguageModel(AIModel): if callback.raise_error: raise e else: - logger.warning(f"Callback {callback.__class__.__name__} on_before_invoke failed with error {e}") + logger.warning( + "Callback %s on_before_invoke failed with error %s", callback.__class__.__name__, e + ) def _trigger_new_chunk_callbacks( self, @@ -487,7 +489,7 @@ class LargeLanguageModel(AIModel): if callback.raise_error: raise e else: - logger.warning(f"Callback {callback.__class__.__name__} on_new_chunk failed with error {e}") + logger.warning("Callback %s on_new_chunk failed with error %s", callback.__class__.__name__, e) def _trigger_after_invoke_callbacks( self, @@ -535,7 +537,9 @@ class LargeLanguageModel(AIModel): if callback.raise_error: raise e else: - logger.warning(f"Callback {callback.__class__.__name__} on_after_invoke failed with error {e}") + logger.warning( + "Callback %s on_after_invoke failed with error %s", callback.__class__.__name__, e + ) def _trigger_invoke_error_callbacks( self, @@ -583,4 +587,6 @@ class LargeLanguageModel(AIModel): if callback.raise_error: raise e else: - logger.warning(f"Callback {callback.__class__.__name__} on_invoke_error failed with error {e}") + logger.warning( + "Callback %s on_invoke_error failed with error %s", callback.__class__.__name__, e + ) diff --git a/api/core/moderation/output_moderation.py b/api/core/moderation/output_moderation.py index 2ec315417f..b39db4b7ff 100644 --- a/api/core/moderation/output_moderation.py +++ b/api/core/moderation/output_moderation.py @@ -136,6 +136,6 @@ class OutputModeration(BaseModel): result: ModerationOutputsResult = moderation_factory.moderation_for_outputs(moderation_buffer) return result except Exception as e: - logger.exception(f"Moderation Output error, app_id: {app_id}") + logger.exception("Moderation Output error, app_id: %s", app_id) return None diff --git a/api/core/ops/aliyun_trace/aliyun_trace.py b/api/core/ops/aliyun_trace/aliyun_trace.py index cf367efdf0..9dd830a023 100644 --- a/api/core/ops/aliyun_trace/aliyun_trace.py +++ b/api/core/ops/aliyun_trace/aliyun_trace.py @@ -97,7 +97,7 @@ class AliyunDataTrace(BaseTraceInstance): try: return self.trace_client.get_project_url() except Exception as e: - logger.info(f"Aliyun get run url failed: {str(e)}", exc_info=True) + logger.info("Aliyun get run url failed: %s", str(e), exc_info=True) raise ValueError(f"Aliyun get run url failed: {str(e)}") def workflow_trace(self, trace_info: WorkflowTraceInfo): @@ -286,7 +286,7 @@ class AliyunDataTrace(BaseTraceInstance): node_span = self.build_workflow_task_span(trace_id, workflow_span_id, trace_info, node_execution) return node_span except Exception as e: - logging.debug(f"Error occurred in build_workflow_node_span: {e}", exc_info=True) + logging.debug("Error occurred in build_workflow_node_span: %s", e, exc_info=True) return None def get_workflow_node_status(self, node_execution: WorkflowNodeExecution) -> Status: diff --git a/api/core/ops/aliyun_trace/data_exporter/traceclient.py b/api/core/ops/aliyun_trace/data_exporter/traceclient.py index ba5ac3f420..934ce95a64 100644 --- a/api/core/ops/aliyun_trace/data_exporter/traceclient.py +++ b/api/core/ops/aliyun_trace/data_exporter/traceclient.py @@ -69,10 +69,10 @@ class TraceClient: if response.status_code == 405: return True else: - logger.debug(f"AliyunTrace API check failed: Unexpected status code: {response.status_code}") + logger.debug("AliyunTrace API check failed: Unexpected status code: %s", response.status_code) return False except requests.exceptions.RequestException as e: - logger.debug(f"AliyunTrace API check failed: {str(e)}") + logger.debug("AliyunTrace API check failed: %s", str(e)) raise ValueError(f"AliyunTrace API check failed: {str(e)}") def get_project_url(self): @@ -109,7 +109,7 @@ class TraceClient: try: self.exporter.export(spans_to_export) except Exception as e: - logger.debug(f"Error exporting spans: {e}") + logger.debug("Error exporting spans: %s", e) def shutdown(self): with self.condition: diff --git a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py index 1b72a4775a..f252a022d8 100644 --- a/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py +++ b/api/core/ops/arize_phoenix_trace/arize_phoenix_trace.py @@ -77,10 +77,10 @@ def setup_tracer(arize_phoenix_config: ArizeConfig | PhoenixConfig) -> tuple[tra # Create a named tracer instead of setting the global provider tracer_name = f"arize_phoenix_tracer_{arize_phoenix_config.project}" - logger.info(f"[Arize/Phoenix] Created tracer with name: {tracer_name}") + logger.info("[Arize/Phoenix] Created tracer with name: %s", tracer_name) return cast(trace_sdk.Tracer, provider.get_tracer(tracer_name)), processor except Exception as e: - logger.error(f"[Arize/Phoenix] Failed to setup the tracer: {str(e)}", exc_info=True) + logger.error("[Arize/Phoenix] Failed to setup the tracer: %s", str(e), exc_info=True) raise @@ -120,7 +120,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): self.file_base_url = os.getenv("FILES_URL", "http://127.0.0.1:5001") def trace(self, trace_info: BaseTraceInfo): - logger.info(f"[Arize/Phoenix] Trace: {trace_info}") + logger.info("[Arize/Phoenix] Trace: %s", trace_info) try: if isinstance(trace_info, WorkflowTraceInfo): self.workflow_trace(trace_info) @@ -138,7 +138,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): self.generate_name_trace(trace_info) except Exception as e: - logger.error(f"[Arize/Phoenix] Error in the trace: {str(e)}", exc_info=True) + logger.error("[Arize/Phoenix] Error in the trace: %s", str(e), exc_info=True) raise def workflow_trace(self, trace_info: WorkflowTraceInfo): @@ -570,7 +570,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): trace_id = uuid_to_trace_id(trace_info.message_id) tool_span_id = RandomIdGenerator().generate_span_id() - logger.info(f"[Arize/Phoenix] Creating tool trace with trace_id: {trace_id}, span_id: {tool_span_id}") + logger.info("[Arize/Phoenix] Creating tool trace with trace_id: %s, span_id: %s", trace_id, tool_span_id) # Create span context with the same trace_id as the parent # todo: Create with the appropriate parent span context, so that the tool span is @@ -673,7 +673,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): span.set_attribute("test", "true") return True except Exception as e: - logger.info(f"[Arize/Phoenix] API check failed: {str(e)}", exc_info=True) + logger.info("[Arize/Phoenix] API check failed: %s", str(e), exc_info=True) raise ValueError(f"[Arize/Phoenix] API check failed: {str(e)}") def get_project_url(self): @@ -683,7 +683,7 @@ class ArizePhoenixDataTrace(BaseTraceInstance): else: return f"{self.arize_phoenix_config.endpoint}/projects/" except Exception as e: - logger.info(f"[Arize/Phoenix] Get run url failed: {str(e)}", exc_info=True) + logger.info("[Arize/Phoenix] Get run url failed: %s", str(e), exc_info=True) raise ValueError(f"[Arize/Phoenix] Get run url failed: {str(e)}") def _get_workflow_nodes(self, workflow_run_id: str): diff --git a/api/core/ops/langfuse_trace/langfuse_trace.py b/api/core/ops/langfuse_trace/langfuse_trace.py index f4a59ef3a7..d356e735ee 100644 --- a/api/core/ops/langfuse_trace/langfuse_trace.py +++ b/api/core/ops/langfuse_trace/langfuse_trace.py @@ -440,7 +440,7 @@ class LangFuseDataTrace(BaseTraceInstance): try: return self.langfuse_client.auth_check() except Exception as e: - logger.debug(f"LangFuse API check failed: {str(e)}") + logger.debug("LangFuse API check failed: %s", str(e)) raise ValueError(f"LangFuse API check failed: {str(e)}") def get_project_key(self): @@ -448,5 +448,5 @@ class LangFuseDataTrace(BaseTraceInstance): projects = self.langfuse_client.client.projects.get() return projects.data[0].id except Exception as e: - logger.debug(f"LangFuse get project key failed: {str(e)}") + logger.debug("LangFuse get project key failed: %s", str(e)) raise ValueError(f"LangFuse get project key failed: {str(e)}") diff --git a/api/core/ops/langsmith_trace/langsmith_trace.py b/api/core/ops/langsmith_trace/langsmith_trace.py index c97846dc9b..fb3f6ecf0d 100644 --- a/api/core/ops/langsmith_trace/langsmith_trace.py +++ b/api/core/ops/langsmith_trace/langsmith_trace.py @@ -504,7 +504,7 @@ class LangSmithDataTrace(BaseTraceInstance): self.langsmith_client.delete_project(project_name=random_project_name) return True except Exception as e: - logger.debug(f"LangSmith API check failed: {str(e)}") + logger.debug("LangSmith API check failed: %s", str(e)) raise ValueError(f"LangSmith API check failed: {str(e)}") def get_project_url(self): @@ -523,5 +523,5 @@ class LangSmithDataTrace(BaseTraceInstance): ) return project_url.split("/r/")[0] except Exception as e: - logger.debug(f"LangSmith get run url failed: {str(e)}") + logger.debug("LangSmith get run url failed: %s", str(e)) raise ValueError(f"LangSmith get run url failed: {str(e)}") diff --git a/api/core/ops/opik_trace/opik_trace.py b/api/core/ops/opik_trace/opik_trace.py index 6079b2faef..1e52f28350 100644 --- a/api/core/ops/opik_trace/opik_trace.py +++ b/api/core/ops/opik_trace/opik_trace.py @@ -453,12 +453,12 @@ class OpikDataTrace(BaseTraceInstance): self.opik_client.auth_check() return True except Exception as e: - logger.info(f"Opik API check failed: {str(e)}", exc_info=True) + logger.info("Opik API check failed: %s", str(e), exc_info=True) raise ValueError(f"Opik API check failed: {str(e)}") def get_project_url(self): try: return self.opik_client.get_project_url(project_name=self.project) except Exception as e: - logger.info(f"Opik get run url failed: {str(e)}", exc_info=True) + logger.info("Opik get run url failed: %s", str(e), exc_info=True) raise ValueError(f"Opik get run url failed: {str(e)}") diff --git a/api/core/ops/ops_trace_manager.py b/api/core/ops/ops_trace_manager.py index 2b546b47cc..91cdc937a6 100644 --- a/api/core/ops/ops_trace_manager.py +++ b/api/core/ops/ops_trace_manager.py @@ -287,7 +287,7 @@ class OpsTraceManager: # create new tracing_instance and update the cache if it absent tracing_instance = trace_instance(config_class(**decrypt_trace_config)) cls.ops_trace_instances_cache[decrypt_trace_config_key] = tracing_instance - logging.info(f"new tracing_instance for app_id: {app_id}") + logging.info("new tracing_instance for app_id: %s", app_id) return tracing_instance @classmethod @@ -843,7 +843,7 @@ class TraceQueueManager: trace_task.app_id = self.app_id trace_manager_queue.put(trace_task) except Exception as e: - logging.exception(f"Error adding trace task, trace_type {trace_task.trace_type}") + logging.exception("Error adding trace task, trace_type %s", trace_task.trace_type) finally: self.start_timer() diff --git a/api/core/ops/weave_trace/weave_trace.py b/api/core/ops/weave_trace/weave_trace.py index a34b3b780c..470601b17a 100644 --- a/api/core/ops/weave_trace/weave_trace.py +++ b/api/core/ops/weave_trace/weave_trace.py @@ -66,11 +66,11 @@ class WeaveDataTrace(BaseTraceInstance): project_url = f"https://wandb.ai/{self.weave_client._project_id()}" return project_url except Exception as e: - logger.debug(f"Weave get run url failed: {str(e)}") + logger.debug("Weave get run url failed: %s", str(e)) raise ValueError(f"Weave get run url failed: {str(e)}") def trace(self, trace_info: BaseTraceInfo): - logger.debug(f"Trace info: {trace_info}") + logger.debug("Trace info: %s", trace_info) if isinstance(trace_info, WorkflowTraceInfo): self.workflow_trace(trace_info) if isinstance(trace_info, MessageTraceInfo): @@ -403,7 +403,7 @@ class WeaveDataTrace(BaseTraceInstance): print("Weave login successful") return True except Exception as e: - logger.debug(f"Weave API check failed: {str(e)}") + logger.debug("Weave API check failed: %s", str(e)) raise ValueError(f"Weave API check failed: {str(e)}") def start_call(self, run_data: WeaveTraceModel, parent_run_id: Optional[str] = None): diff --git a/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py b/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py index 68a9952789..213fe2fd5a 100644 --- a/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py +++ b/api/core/rag/datasource/vdb/couchbase/couchbase_vector.py @@ -242,7 +242,7 @@ class CouchbaseVector(BaseVector): try: self._cluster.query(query, named_parameters={"doc_ids": ids}).execute() except Exception as e: - logger.exception(f"Failed to delete documents, ids: {ids}") + logger.exception("Failed to delete documents, ids: %s", ids) def delete_by_document_id(self, document_id: str): query = f""" diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py index 27575197fa..7118029d40 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_ja_vector.py @@ -29,7 +29,7 @@ class ElasticSearchJaVector(ElasticSearchVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name}" if redis_client.get(collection_exist_cache_key): - logger.info(f"Collection {self._collection_name} already exists.") + logger.info("Collection %s already exists.", self._collection_name) return if not self._client.indices.exists(index=self._collection_name): diff --git a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py index ad39717183..832485b236 100644 --- a/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py +++ b/api/core/rag/datasource/vdb/elasticsearch/elasticsearch_vector.py @@ -186,7 +186,7 @@ class ElasticSearchVector(BaseVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name}" if redis_client.get(collection_exist_cache_key): - logger.info(f"Collection {self._collection_name} already exists.") + logger.info("Collection %s already exists.", self._collection_name) return if not self._client.indices.exists(index=self._collection_name): diff --git a/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py b/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py index 89423eb160..0a4067e39c 100644 --- a/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py +++ b/api/core/rag/datasource/vdb/huawei/huawei_cloud_vector.py @@ -164,7 +164,7 @@ class HuaweiCloudVector(BaseVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name}" if redis_client.get(collection_exist_cache_key): - logger.info(f"Collection {self._collection_name} already exists.") + logger.info("Collection %s already exists.", self._collection_name) return if not self._client.indices.exists(index=self._collection_name): diff --git a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py index e9ff1ce43d..3c65a41f08 100644 --- a/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py +++ b/api/core/rag/datasource/vdb/lindorm/lindorm_vector.py @@ -89,7 +89,7 @@ class LindormVectorStore(BaseVector): timeout: int = 60, **kwargs, ): - logger.info(f"Total documents to add: {len(documents)}") + logger.info("Total documents to add: %s", len(documents)) uuids = self._get_uuids(documents) total_docs = len(documents) @@ -147,7 +147,7 @@ class LindormVectorStore(BaseVector): time.sleep(0.5) except Exception: - logger.exception(f"Failed to process batch {batch_num + 1}") + logger.exception("Failed to process batch %s", batch_num + 1) raise def get_ids_by_metadata_field(self, key: str, value: str): @@ -180,7 +180,7 @@ class LindormVectorStore(BaseVector): # 1. First check if collection exists if not self._client.indices.exists(index=self._collection_name): - logger.warning(f"Collection {self._collection_name} does not exist") + logger.warning("Collection %s does not exist", self._collection_name) return # 2. Batch process deletions @@ -196,7 +196,7 @@ class LindormVectorStore(BaseVector): } ) else: - logger.warning(f"DELETE BY ID: ID {id} does not exist in the index.") + logger.warning("DELETE BY ID: ID %s does not exist in the index.", id) # 3. Perform bulk deletion if there are valid documents to delete if actions: @@ -209,9 +209,9 @@ class LindormVectorStore(BaseVector): doc_id = delete_error.get("_id") if status == 404: - logger.warning(f"Document not found for deletion: {doc_id}") + logger.warning("Document not found for deletion: %s", doc_id) else: - logger.exception(f"Error deleting document: {error}") + logger.exception("Error deleting document: %s", error) def delete(self) -> None: if self._using_ugc: @@ -225,7 +225,7 @@ class LindormVectorStore(BaseVector): self._client.indices.delete(index=self._collection_name, params={"timeout": 60}) logger.info("Delete index success") else: - logger.warning(f"Index '{self._collection_name}' does not exist. No deletion performed.") + logger.warning("Index '%s' does not exist. No deletion performed.", self._collection_name) def text_exists(self, id: str) -> bool: try: @@ -257,7 +257,7 @@ class LindormVectorStore(BaseVector): params["routing"] = self._routing # type: ignore response = self._client.search(index=self._collection_name, body=query, params=params) except Exception: - logger.exception(f"Error executing vector search, query: {query}") + logger.exception("Error executing vector search, query: %s", query) raise docs_and_scores = [] @@ -324,10 +324,10 @@ class LindormVectorStore(BaseVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name}" if redis_client.get(collection_exist_cache_key): - logger.info(f"Collection {self._collection_name} already exists.") + logger.info("Collection %s already exists.", self._collection_name) return if self._client.indices.exists(index=self._collection_name): - logger.info(f"{self._collection_name.lower()} already exists.") + logger.info("%s already exists.", self._collection_name.lower()) redis_client.set(collection_exist_cache_key, 1, ex=3600) return if len(self.kwargs) == 0 and len(kwargs) != 0: diff --git a/api/core/rag/datasource/vdb/milvus/milvus_vector.py b/api/core/rag/datasource/vdb/milvus/milvus_vector.py index 63de6a0603..b86f0bea33 100644 --- a/api/core/rag/datasource/vdb/milvus/milvus_vector.py +++ b/api/core/rag/datasource/vdb/milvus/milvus_vector.py @@ -103,7 +103,7 @@ class MilvusVector(BaseVector): # For standard Milvus installations, check version number return version.parse(milvus_version).base_version >= version.parse("2.5.0").base_version except Exception as e: - logger.warning(f"Failed to check Milvus version: {str(e)}. Disabling hybrid search.") + logger.warning("Failed to check Milvus version: %s. Disabling hybrid search.", str(e)) return False def get_type(self) -> str: diff --git a/api/core/rag/datasource/vdb/myscale/myscale_vector.py b/api/core/rag/datasource/vdb/myscale/myscale_vector.py index dbb1a7fe19..d5ec4b4436 100644 --- a/api/core/rag/datasource/vdb/myscale/myscale_vector.py +++ b/api/core/rag/datasource/vdb/myscale/myscale_vector.py @@ -53,7 +53,7 @@ class MyScaleVector(BaseVector): return self.add_texts(documents=texts, embeddings=embeddings, **kwargs) def _create_collection(self, dimension: int): - logging.info(f"create MyScale collection {self._collection_name} with dimension {dimension}") + logging.info("create MyScale collection %s with dimension %s", self._collection_name, dimension) self._client.command(f"CREATE DATABASE IF NOT EXISTS {self._config.database}") fts_params = f"('{self._config.fts_params}')" if self._config.fts_params else "" sql = f""" @@ -151,7 +151,7 @@ class MyScaleVector(BaseVector): for r in self._client.query(sql).named_results() ] except Exception as e: - logging.exception(f"\033[91m\033[1m{type(e)}\033[0m \033[95m{str(e)}\033[0m") # noqa:TRY401 + logging.exception("\033[91m\033[1m%s\033[0m \033[95m%s\033[0m", type(e), str(e)) # noqa:TRY401 return [] def delete(self) -> None: diff --git a/api/core/rag/datasource/vdb/oceanbase/oceanbase_vector.py b/api/core/rag/datasource/vdb/oceanbase/oceanbase_vector.py index dd196e1f09..d6dfe967d7 100644 --- a/api/core/rag/datasource/vdb/oceanbase/oceanbase_vector.py +++ b/api/core/rag/datasource/vdb/oceanbase/oceanbase_vector.py @@ -147,7 +147,7 @@ class OceanBaseVector(BaseVector): logger.debug("Current OceanBase version is %s", ob_version) return version.parse(ob_version).base_version >= version.parse("4.3.5.1").base_version except Exception as e: - logger.warning(f"Failed to check OceanBase version: {str(e)}. Disabling hybrid search.") + logger.warning("Failed to check OceanBase version: %s. Disabling hybrid search.", str(e)) return False def add_texts(self, documents: list[Document], embeddings: list[list[float]], **kwargs): @@ -229,7 +229,7 @@ class OceanBaseVector(BaseVector): return docs except Exception as e: - logger.warning(f"Failed to fulltext search: {str(e)}.") + logger.warning("Failed to fulltext search: %s.", str(e)) return [] def search_by_vector(self, query_vector: list[float], **kwargs: Any) -> list[Document]: diff --git a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py index 0abb3c0077..ed2dcb40ad 100644 --- a/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py +++ b/api/core/rag/datasource/vdb/opensearch/opensearch_vector.py @@ -131,7 +131,7 @@ class OpenSearchVector(BaseVector): def delete_by_ids(self, ids: list[str]) -> None: index_name = self._collection_name.lower() if not self._client.indices.exists(index=index_name): - logger.warning(f"Index {index_name} does not exist") + logger.warning("Index %s does not exist", index_name) return # Obtaining All Actual Documents_ID @@ -142,7 +142,7 @@ class OpenSearchVector(BaseVector): if es_ids: actual_ids.extend(es_ids) else: - logger.warning(f"Document with metadata doc_id {doc_id} not found for deletion") + logger.warning("Document with metadata doc_id %s not found for deletion", doc_id) if actual_ids: actions = [{"_op_type": "delete", "_index": index_name, "_id": es_id} for es_id in actual_ids] @@ -155,9 +155,9 @@ class OpenSearchVector(BaseVector): doc_id = delete_error.get("_id") if status == 404: - logger.warning(f"Document not found for deletion: {doc_id}") + logger.warning("Document not found for deletion: %s", doc_id) else: - logger.exception(f"Error deleting document: {error}") + logger.exception("Error deleting document: %s", error) def delete(self) -> None: self._client.indices.delete(index=self._collection_name.lower()) @@ -198,7 +198,7 @@ class OpenSearchVector(BaseVector): try: response = self._client.search(index=self._collection_name.lower(), body=query) except Exception as e: - logger.exception(f"Error executing vector search, query: {query}") + logger.exception("Error executing vector search, query: %s", query) raise docs = [] @@ -242,7 +242,7 @@ class OpenSearchVector(BaseVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name.lower()}" if redis_client.get(collection_exist_cache_key): - logger.info(f"Collection {self._collection_name.lower()} already exists.") + logger.info("Collection %s already exists.", self._collection_name.lower()) return if not self._client.indices.exists(index=self._collection_name.lower()): @@ -272,7 +272,7 @@ class OpenSearchVector(BaseVector): }, } - logger.info(f"Creating OpenSearch index {self._collection_name.lower()}") + logger.info("Creating OpenSearch index %s", self._collection_name.lower()) self._client.indices.create(index=self._collection_name.lower(), body=index_body) redis_client.set(collection_exist_cache_key, 1, ex=3600) diff --git a/api/core/rag/datasource/vdb/pgvector/pgvector.py b/api/core/rag/datasource/vdb/pgvector/pgvector.py index 04e9cf801e..746773da63 100644 --- a/api/core/rag/datasource/vdb/pgvector/pgvector.py +++ b/api/core/rag/datasource/vdb/pgvector/pgvector.py @@ -155,7 +155,7 @@ class PGVector(BaseVector): cur.execute(f"DELETE FROM {self.table_name} WHERE id IN %s", (tuple(ids),)) except psycopg2.errors.UndefinedTable: # table not exists - logging.warning(f"Table {self.table_name} not found, skipping delete operation.") + logging.warning("Table %s not found, skipping delete operation.", self.table_name) return except Exception as e: raise e diff --git a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py index 9ed6e7369b..784e27fc7f 100644 --- a/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py +++ b/api/core/rag/datasource/vdb/tablestore/tablestore_vector.py @@ -142,7 +142,7 @@ class TableStoreVector(BaseVector): with redis_client.lock(lock_name, timeout=20): collection_exist_cache_key = f"vector_indexing_{self._collection_name}" if redis_client.get(collection_exist_cache_key): - logging.info(f"Collection {self._collection_name} already exists.") + logging.info("Collection %s already exists.", self._collection_name) return self._create_table_if_not_exist() diff --git a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py index 61c68b939e..b2c408904f 100644 --- a/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py +++ b/api/core/rag/datasource/vdb/tidb_vector/tidb_vector.py @@ -192,7 +192,7 @@ class TiDBVector(BaseVector): query_vector_str = ", ".join(format(x) for x in query_vector) query_vector_str = "[" + query_vector_str + "]" logger.debug( - f"_collection_name: {self._collection_name}, score_threshold: {score_threshold}, distance: {distance}" + "_collection_name: %s, score_threshold: %s, distance: %s", self._collection_name, score_threshold, distance ) docs = [] diff --git a/api/core/rag/datasource/vdb/vector_factory.py b/api/core/rag/datasource/vdb/vector_factory.py index e018f7d3d4..95410265af 100644 --- a/api/core/rag/datasource/vdb/vector_factory.py +++ b/api/core/rag/datasource/vdb/vector_factory.py @@ -178,19 +178,19 @@ class Vector: def create(self, texts: Optional[list] = None, **kwargs): if texts: start = time.time() - logger.info(f"start embedding {len(texts)} texts {start}") + logger.info("start embedding %s texts %s", len(texts), start) batch_size = 1000 total_batches = len(texts) + batch_size - 1 for i in range(0, len(texts), batch_size): batch = texts[i : i + batch_size] batch_start = time.time() - logger.info(f"Processing batch {i // batch_size + 1}/{total_batches} ({len(batch)} texts)") + logger.info("Processing batch %s/%s (%s texts)", i // batch_size + 1, total_batches, len(batch)) batch_embeddings = self._embeddings.embed_documents([document.page_content for document in batch]) logger.info( - f"Embedding batch {i // batch_size + 1}/{total_batches} took {time.time() - batch_start:.3f}s" + "Embedding batch %s/%s took %s s", i // batch_size + 1, total_batches, time.time() - batch_start ) self._vector_processor.create(texts=batch, embeddings=batch_embeddings, **kwargs) - logger.info(f"Embedding {len(texts)} texts took {time.time() - start:.3f}s") + logger.info("Embedding %s texts took %s s", len(texts), time.time() - start) def add_texts(self, documents: list[Document], **kwargs): if kwargs.get("duplicate_check", False): diff --git a/api/core/rag/embedding/cached_embedding.py b/api/core/rag/embedding/cached_embedding.py index f50f9f6b60..9848a28384 100644 --- a/api/core/rag/embedding/cached_embedding.py +++ b/api/core/rag/embedding/cached_embedding.py @@ -69,7 +69,7 @@ class CacheEmbedding(Embeddings): # stackoverflow best way: https://stackoverflow.com/questions/20319813/how-to-check-list-containing-nan if np.isnan(normalized_embedding).any(): # for issue #11827 float values are not json compliant - logger.warning(f"Normalized embedding is nan: {normalized_embedding}") + logger.warning("Normalized embedding is nan: %s", normalized_embedding) continue embedding_queue_embeddings.append(normalized_embedding) except IntegrityError: @@ -122,7 +122,7 @@ class CacheEmbedding(Embeddings): raise ValueError("Normalized embedding is nan please try again") except Exception as ex: if dify_config.DEBUG: - logging.exception(f"Failed to embed query text '{text[:10]}...({len(text)} chars)'") + logging.exception("Failed to embed query text '%s...(%s chars)'", text[:10], len(text)) raise ex try: @@ -136,7 +136,9 @@ class CacheEmbedding(Embeddings): redis_client.setex(embedding_cache_key, 600, encoded_str) except Exception as ex: if dify_config.DEBUG: - logging.exception(f"Failed to add embedding to redis for the text '{text[:10]}...({len(text)} chars)'") + logging.exception( + "Failed to add embedding to redis for the text '%s...(%s chars)'", text[:10], len(text) + ) raise ex return embedding_results # type: ignore diff --git a/api/core/rag/splitter/text_splitter.py b/api/core/rag/splitter/text_splitter.py index 529d8ccd27..489aa05430 100644 --- a/api/core/rag/splitter/text_splitter.py +++ b/api/core/rag/splitter/text_splitter.py @@ -116,7 +116,7 @@ class TextSplitter(BaseDocumentTransformer, ABC): if total + _len + (separator_len if len(current_doc) > 0 else 0) > self._chunk_size: if total > self._chunk_size: logger.warning( - f"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}" + "Created a chunk of size %s, which is longer than the specified %s", total, self._chunk_size ) if len(current_doc) > 0: doc = self._join_docs(current_doc, separator) diff --git a/api/core/repositories/factory.py b/api/core/repositories/factory.py index 4118aa61c7..6e636883ae 100644 --- a/api/core/repositories/factory.py +++ b/api/core/repositories/factory.py @@ -153,7 +153,7 @@ class DifyCoreRepositoryFactory: RepositoryImportError: If the configured repository cannot be created """ class_path = dify_config.CORE_WORKFLOW_EXECUTION_REPOSITORY - logger.debug(f"Creating WorkflowExecutionRepository from: {class_path}") + logger.debug("Creating WorkflowExecutionRepository from: %s", class_path) try: repository_class = cls._import_class(class_path) @@ -199,7 +199,7 @@ class DifyCoreRepositoryFactory: RepositoryImportError: If the configured repository cannot be created """ class_path = dify_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY - logger.debug(f"Creating WorkflowNodeExecutionRepository from: {class_path}") + logger.debug("Creating WorkflowNodeExecutionRepository from: %s", class_path) try: repository_class = cls._import_class(class_path) diff --git a/api/core/repositories/sqlalchemy_workflow_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_execution_repository.py index c579ff4028..74a49842f3 100644 --- a/api/core/repositories/sqlalchemy_workflow_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_execution_repository.py @@ -203,5 +203,5 @@ class SQLAlchemyWorkflowExecutionRepository(WorkflowExecutionRepository): session.commit() # Update the in-memory cache for faster subsequent lookups - logger.debug(f"Updating cache for execution_id: {db_model.id}") + logger.debug("Updating cache for execution_id: %s", db_model.id) self._execution_cache[db_model.id] = db_model diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index d4a31390f8..f4532d7f29 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -215,7 +215,7 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) # Update the in-memory cache for faster subsequent lookups # Only cache if we have a node_execution_id to use as the cache key if db_model.node_execution_id: - logger.debug(f"Updating cache for node_execution_id: {db_model.node_execution_id}") + logger.debug("Updating cache for node_execution_id: %s", db_model.node_execution_id) self._node_execution_cache[db_model.node_execution_id] = db_model def get_db_models_by_workflow_run( diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index f286466de0..ba152ceec0 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -206,7 +206,7 @@ class ToolManager: ) except Exception as e: builtin_provider = None - logger.info(f"Error getting builtin provider {credential_id}:{e}", exc_info=True) + logger.info("Error getting builtin provider %s:%s", credential_id, e, exc_info=True) # if the provider has been deleted, raise an error if builtin_provider is None: raise ToolProviderNotFoundError(f"provider has been deleted: {credential_id}") @@ -569,7 +569,7 @@ class ToolManager: yield provider except Exception: - logger.exception(f"load builtin provider {provider_path}") + logger.exception("load builtin provider %s", provider_path) continue # set builtin providers loaded cls._builtin_providers_loaded = True diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index 8b89c2a7a9..962b9f7a81 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -194,7 +194,7 @@ class WorkflowTool(Tool): files.append(file_dict) except Exception: - logger.exception(f"Failed to transform file {file}") + logger.exception("Failed to transform file %s", file) else: parameters_result[parameter.name] = tool_parameters.get(parameter.name) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index b315129763..ebe6bc42cc 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -377,7 +377,7 @@ class GraphEngine: edge = cast(GraphEdge, sub_edge_mappings[0]) if edge.run_condition is None: - logger.warning(f"Edge {edge.target_node_id} run condition is None") + logger.warning("Edge %s run condition is None", edge.target_node_id) continue result = ConditionManager.get_condition_handler( @@ -848,7 +848,7 @@ class GraphEngine: ) return except Exception as e: - logger.exception(f"Node {node.title} run failed") + logger.exception("Node %s run failed", node.title) raise e def _append_variables_recursively(self, node_id: str, variable_key_list: list[str], variable_value: VariableValue): diff --git a/api/core/workflow/nodes/answer/base_stream_processor.py b/api/core/workflow/nodes/answer/base_stream_processor.py index 09d5464d7a..7e84557a2d 100644 --- a/api/core/workflow/nodes/answer/base_stream_processor.py +++ b/api/core/workflow/nodes/answer/base_stream_processor.py @@ -36,7 +36,7 @@ class StreamProcessor(ABC): reachable_node_ids: list[str] = [] unreachable_first_node_ids: list[str] = [] if finished_node_id not in self.graph.edge_mapping: - logger.warning(f"node {finished_node_id} has no edge mapping") + logger.warning("node %s has no edge mapping", finished_node_id) return for edge in self.graph.edge_mapping[finished_node_id]: if ( diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index fb5ec55453..be4f79af19 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -65,7 +65,7 @@ class BaseNode: try: result = self._run() except Exception as e: - logger.exception(f"Node {self.node_id} failed to run") + logger.exception("Node %s failed to run", self.node_id) result = NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=str(e), diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index ab5964ebd4..58741fac8b 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -363,7 +363,7 @@ def _extract_text_from_docx(file_content: bytes) -> str: text.append(markdown_table) except Exception as e: - logger.warning(f"Failed to extract table from DOC: {e}") + logger.warning("Failed to extract table from DOC: %s", e) continue return "\n".join(text) diff --git a/api/core/workflow/nodes/http_request/node.py b/api/core/workflow/nodes/http_request/node.py index 6799d5c63c..bc1d5c9b87 100644 --- a/api/core/workflow/nodes/http_request/node.py +++ b/api/core/workflow/nodes/http_request/node.py @@ -129,7 +129,7 @@ class HttpRequestNode(BaseNode): }, ) except HttpRequestNodeError as e: - logger.warning(f"http request node {self.node_id} failed to run: {e}") + logger.warning("http request node %s failed to run: %s", self.node_id, e) return NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, error=str(e), diff --git a/api/core/workflow/nodes/iteration/iteration_node.py b/api/core/workflow/nodes/iteration/iteration_node.py index 5842c8d64b..def1e1cfa3 100644 --- a/api/core/workflow/nodes/iteration/iteration_node.py +++ b/api/core/workflow/nodes/iteration/iteration_node.py @@ -616,7 +616,7 @@ class IterationNode(BaseNode): ) except IterationNodeError as e: - logger.warning(f"Iteration run failed:{str(e)}") + logger.warning("Iteration run failed:%s", str(e)) yield IterationRunFailedEvent( iteration_id=self.id, iteration_node_id=self.node_id, diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index a23d284626..45c5e0a62c 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -670,7 +670,7 @@ class ParameterExtractorNode(BaseNode): return cast(dict, json.loads(json_str)) except Exception: pass - logger.info(f"extra error: {result}") + logger.info("extra error: %s", result) return None def _extract_json_from_tool_call(self, tool_call: AssistantPromptMessage.ToolCall) -> Optional[dict]: @@ -690,7 +690,7 @@ class ParameterExtractorNode(BaseNode): return cast(dict, json.loads(json_str)) except Exception: pass - logger.info(f"extra error: {result}") + logger.info("extra error: %s", result) return None def _generate_default_result(self, data: ParameterExtractorNodeData) -> dict: diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index c8082ebf50..a962ed9f4d 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -193,7 +193,13 @@ class WorkflowEntry: # run node generator = node.run() except Exception as e: - logger.exception(f"error while running node, {workflow.id=}, {node.id=}, {node.type_=}, {node.version()=}") + logger.exception( + "error while running node, workflow_id=%s, node_id=%s, node_type=%s, node_version=%s", + workflow.id, + node.id, + node.type_, + node.version(), + ) raise WorkflowNodeRunFailedError(node=node, err_msg=str(e)) return node, generator @@ -297,7 +303,12 @@ class WorkflowEntry: return node, generator except Exception as e: - logger.exception(f"error while running node, {node.id=}, {node.type_=}, {node.version()=}") + logger.exception( + "error while running node, node_id=%s, node_type=%s, node_version=%s", + node.id, + node.type_, + node.version(), + ) raise WorkflowNodeRunFailedError(node=node, err_msg=str(e)) @staticmethod diff --git a/api/events/event_handlers/update_provider_when_message_created.py b/api/events/event_handlers/update_provider_when_message_created.py index d3943f2eda..2ed42c71ea 100644 --- a/api/events/event_handlers/update_provider_when_message_created.py +++ b/api/events/event_handlers/update_provider_when_message_created.py @@ -131,9 +131,11 @@ def handle(sender: Message, **kwargs): duration = time_module.perf_counter() - start_time logger.info( - f"Provider updates completed successfully. " - f"Updates: {len(updates_to_perform)}, Duration: {duration:.3f}s, " - f"Tenant: {tenant_id}, Provider: {provider_name}" + "Provider updates completed successfully. Updates: %s, Duration: %s s, Tenant: %s, Provider: %s", + len(updates_to_perform), + duration, + tenant_id, + provider_name, ) except Exception as e: @@ -141,9 +143,11 @@ def handle(sender: Message, **kwargs): duration = time_module.perf_counter() - start_time logger.exception( - f"Provider updates failed after {duration:.3f}s. " - f"Updates: {len(updates_to_perform)}, Tenant: {tenant_id}, " - f"Provider: {provider_name}" + "Provider updates failed after %s s. Updates: %s, Tenant: %s, Provider: %s", + duration, + len(updates_to_perform), + tenant_id, + provider_name, ) raise @@ -219,16 +223,20 @@ def _execute_provider_updates(updates_to_perform: list[_ProviderUpdateOperation] rows_affected = result.rowcount logger.debug( - f"Provider update ({description}): {rows_affected} rows affected. " - f"Filters: {filters.model_dump()}, Values: {update_values}" + "Provider update (%s): %s rows affected. Filters: %s, Values: %s", + description, + rows_affected, + filters.model_dump(), + update_values, ) # If no rows were affected for quota updates, log a warning if rows_affected == 0 and description == "quota_deduction_update": logger.warning( - f"No Provider rows updated for quota deduction. " - f"This may indicate quota limit exceeded or provider not found. " - f"Filters: {filters.model_dump()}" + "No Provider rows updated for quota deduction. " + "This may indicate quota limit exceeded or provider not found. " + "Filters: %s", + filters.model_dump(), ) - logger.debug(f"Successfully processed {len(updates_to_perform)} Provider updates") + logger.debug("Successfully processed %s Provider updates", len(updates_to_perform)) diff --git a/api/extensions/ext_redis.py b/api/extensions/ext_redis.py index be2f6115f7..14b9273e9d 100644 --- a/api/extensions/ext_redis.py +++ b/api/extensions/ext_redis.py @@ -137,7 +137,7 @@ def redis_fallback(default_return: Any = None): try: return func(*args, **kwargs) except RedisError as e: - logger.warning(f"Redis operation failed in {func.__name__}: {str(e)}", exc_info=True) + logger.warning("Redis operation failed in %s: %s", func.__name__, str(e), exc_info=True) return default_return return wrapper diff --git a/api/extensions/storage/opendal_storage.py b/api/extensions/storage/opendal_storage.py index 12e2738e9d..0ba35506d3 100644 --- a/api/extensions/storage/opendal_storage.py +++ b/api/extensions/storage/opendal_storage.py @@ -35,21 +35,21 @@ class OpenDALStorage(BaseStorage): Path(root).mkdir(parents=True, exist_ok=True) self.op = opendal.Operator(scheme=scheme, **kwargs) # type: ignore - logger.debug(f"opendal operator created with scheme {scheme}") + logger.debug("opendal operator created with scheme %s", scheme) retry_layer = opendal.layers.RetryLayer(max_times=3, factor=2.0, jitter=True) self.op = self.op.layer(retry_layer) logger.debug("added retry layer to opendal operator") def save(self, filename: str, data: bytes) -> None: self.op.write(path=filename, bs=data) - logger.debug(f"file {filename} saved") + logger.debug("file %s saved", filename) def load_once(self, filename: str) -> bytes: if not self.exists(filename): raise FileNotFoundError("File not found") content: bytes = self.op.read(path=filename) - logger.debug(f"file {filename} loaded") + logger.debug("file %s loaded", filename) return content def load_stream(self, filename: str) -> Generator: @@ -60,7 +60,7 @@ class OpenDALStorage(BaseStorage): file = self.op.open(path=filename, mode="rb") while chunk := file.read(batch_size): yield chunk - logger.debug(f"file {filename} loaded as stream") + logger.debug("file %s loaded as stream", filename) def download(self, filename: str, target_filepath: str): if not self.exists(filename): @@ -68,7 +68,7 @@ class OpenDALStorage(BaseStorage): with Path(target_filepath).open("wb") as f: f.write(self.op.read(path=filename)) - logger.debug(f"file {filename} downloaded to {target_filepath}") + logger.debug("file %s downloaded to %s", filename, target_filepath) def exists(self, filename: str) -> bool: res: bool = self.op.exists(path=filename) @@ -77,9 +77,9 @@ class OpenDALStorage(BaseStorage): def delete(self, filename: str): if self.exists(filename): self.op.delete(path=filename) - logger.debug(f"file {filename} deleted") + logger.debug("file %s deleted", filename) return - logger.debug(f"file {filename} not found, skip delete") + logger.debug("file %s not found, skip delete", filename) def scan(self, path: str, files: bool = True, directories: bool = False) -> list[str]: if not self.exists(path): @@ -87,13 +87,13 @@ class OpenDALStorage(BaseStorage): all_files = self.op.scan(path=path) if files and directories: - logger.debug(f"files and directories on {path} scanned") + logger.debug("files and directories on %s scanned", path) return [f.path for f in all_files] if files: - logger.debug(f"files on {path} scanned") + logger.debug("files on %s scanned", path) return [f.path for f in all_files if not f.path.endswith("/")] elif directories: - logger.debug(f"directories on {path} scanned") + logger.debug("directories on %s scanned", path) return [f.path for f in all_files if f.path.endswith("/")] else: raise ValueError("At least one of files or directories must be True") diff --git a/api/libs/helper.py b/api/libs/helper.py index 00772d530a..e920d4f5e6 100644 --- a/api/libs/helper.py +++ b/api/libs/helper.py @@ -321,7 +321,7 @@ class TokenManager: key = cls._get_token_key(token, token_type) token_data_json = redis_client.get(key) if token_data_json is None: - logging.warning(f"{token_type} token {token} not found with key {key}") + logging.warning("%s token %s not found with key %s", token_type, token, key) return None token_data: Optional[dict[str, Any]] = json.loads(token_data_json) return token_data diff --git a/api/libs/sendgrid.py b/api/libs/sendgrid.py index 5409e3eeeb..cfc6c7d794 100644 --- a/api/libs/sendgrid.py +++ b/api/libs/sendgrid.py @@ -41,5 +41,5 @@ class SendGridClient: ) raise except Exception as e: - logging.exception(f"SendGridClient Unexpected error occurred while sending email to {_to}") + logging.exception("SendGridClient Unexpected error occurred while sending email to %s", _to) raise diff --git a/api/libs/smtp.py b/api/libs/smtp.py index b94386660e..a01ad6fab8 100644 --- a/api/libs/smtp.py +++ b/api/libs/smtp.py @@ -50,7 +50,7 @@ class SMTPClient: logging.exception("Timeout occurred while sending email") raise except Exception as e: - logging.exception(f"Unexpected error occurred while sending email to {mail['to']}") + logging.exception("Unexpected error occurred while sending email to %s", mail["to"]) raise finally: if smtp: diff --git a/api/models/dataset.py b/api/models/dataset.py index d877540213..01372f8bf6 100644 --- a/api/models/dataset.py +++ b/api/models/dataset.py @@ -911,7 +911,7 @@ class DatasetKeywordTable(Base): return json.loads(keyword_table_text.decode("utf-8"), cls=SetDecoder) return None except Exception as e: - logging.exception(f"Failed to load keyword table from file: {file_key}") + logging.exception("Failed to load keyword table from file: %s", file_key) return None diff --git a/api/repositories/factory.py b/api/repositories/factory.py index 0a0adbf2c2..070cdd46dd 100644 --- a/api/repositories/factory.py +++ b/api/repositories/factory.py @@ -48,7 +48,7 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory): RepositoryImportError: If the configured repository cannot be imported or instantiated """ class_path = dify_config.API_WORKFLOW_NODE_EXECUTION_REPOSITORY - logger.debug(f"Creating DifyAPIWorkflowNodeExecutionRepository from: {class_path}") + logger.debug("Creating DifyAPIWorkflowNodeExecutionRepository from: %s", class_path) try: repository_class = cls._import_class(class_path) @@ -86,7 +86,7 @@ class DifyAPIRepositoryFactory(DifyCoreRepositoryFactory): RepositoryImportError: If the configured repository cannot be imported or instantiated """ class_path = dify_config.API_WORKFLOW_RUN_REPOSITORY - logger.debug(f"Creating APIWorkflowRunRepository from: {class_path}") + logger.debug("Creating APIWorkflowRunRepository from: %s", class_path) try: repository_class = cls._import_class(class_path) diff --git a/api/repositories/sqlalchemy_api_workflow_run_repository.py b/api/repositories/sqlalchemy_api_workflow_run_repository.py index ebd1d74b20..7c3b1f4ce0 100644 --- a/api/repositories/sqlalchemy_api_workflow_run_repository.py +++ b/api/repositories/sqlalchemy_api_workflow_run_repository.py @@ -155,7 +155,7 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): session.commit() deleted_count = cast(int, result.rowcount) - logger.info(f"Deleted {deleted_count} workflow runs by IDs") + logger.info("Deleted %s workflow runs by IDs", deleted_count) return deleted_count def delete_runs_by_app( @@ -193,11 +193,11 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): batch_deleted = result.rowcount total_deleted += batch_deleted - logger.info(f"Deleted batch of {batch_deleted} workflow runs for app {app_id}") + logger.info("Deleted batch of %s workflow runs for app %s", batch_deleted, app_id) # If we deleted fewer records than the batch size, we're done if batch_deleted < batch_size: break - logger.info(f"Total deleted {total_deleted} workflow runs for app {app_id}") + logger.info("Total deleted %s workflow runs for app %s", total_deleted, app_id) return total_deleted diff --git a/api/services/account_service.py b/api/services/account_service.py index 59bffa873c..22ef5de5c0 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -329,9 +329,9 @@ class AccountService: db.session.add(account_integrate) db.session.commit() - logging.info(f"Account {account.id} linked {provider} account {open_id}.") + logging.info("Account %s linked %s account %s.", account.id, provider, open_id) except Exception as e: - logging.exception(f"Failed to link {provider} account {open_id} to Account {account.id}") + logging.exception("Failed to link %s account %s to Account %s", provider, open_id, account.id) raise LinkAccountIntegrateError("Failed to link account.") from e @staticmethod @@ -881,7 +881,7 @@ class TenantService: """Create tenant member""" if role == TenantAccountRole.OWNER.value: if TenantService.has_roles(tenant, [TenantAccountRole.OWNER]): - logging.error(f"Tenant {tenant.id} has already an owner.") + logging.error("Tenant %s has already an owner.", tenant.id) raise Exception("Tenant already has an owner.") ta = db.session.query(TenantAccountJoin).filter_by(tenant_id=tenant.id, account_id=account.id).first() @@ -1133,7 +1133,7 @@ class RegisterService: db.session.query(Tenant).delete() db.session.commit() - logging.exception(f"Setup account failed, email: {email}, name: {name}") + logging.exception("Setup account failed, email: %s, name: %s", email, name) raise ValueError(f"Setup failed: {e}") @classmethod diff --git a/api/services/app_service.py b/api/services/app_service.py index 0b6b85bcb2..3557f13337 100644 --- a/api/services/app_service.py +++ b/api/services/app_service.py @@ -94,7 +94,7 @@ class AppService: except (ProviderTokenNotInitError, LLMBadRequestError): model_instance = None except Exception as e: - logging.exception(f"Get default model instance failed, tenant_id: {tenant_id}") + logging.exception("Get default model instance failed, tenant_id: %s", tenant_id) model_instance = None if model_instance: diff --git a/api/services/clear_free_plan_tenant_expired_logs.py b/api/services/clear_free_plan_tenant_expired_logs.py index ad9b750d40..d057a14afb 100644 --- a/api/services/clear_free_plan_tenant_expired_logs.py +++ b/api/services/clear_free_plan_tenant_expired_logs.py @@ -228,7 +228,7 @@ class ClearFreePlanTenantExpiredLogs: # only process sandbox tenant cls.process_tenant(flask_app, tenant_id, days, batch) except Exception: - logger.exception(f"Failed to process tenant {tenant_id}") + logger.exception("Failed to process tenant %s", tenant_id) finally: nonlocal handled_tenant_count handled_tenant_count += 1 @@ -311,7 +311,7 @@ class ClearFreePlanTenantExpiredLogs: try: tenants.append(tenant_id) except Exception: - logger.exception(f"Failed to process tenant {tenant_id}") + logger.exception("Failed to process tenant %s", tenant_id) continue futures.append( diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 4872702a76..8a50a9c6e2 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -605,8 +605,9 @@ class DatasetService: except ProviderTokenNotInitError: # If we can't get the embedding model, preserve existing settings logging.warning( - f"Failed to initialize embedding model {data['embedding_model_provider']}/{data['embedding_model']}, " - f"preserving existing settings" + "Failed to initialize embedding model %s/%s, preserving existing settings", + data["embedding_model_provider"], + data["embedding_model"], ) if dataset.embedding_model_provider and dataset.embedding_model: filtered_data["embedding_model_provider"] = dataset.embedding_model_provider @@ -649,11 +650,11 @@ class DatasetService: @staticmethod def check_dataset_permission(dataset, user): if dataset.tenant_id != user.current_tenant_id: - logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}") + logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id) raise NoPermissionError("You do not have permission to access this dataset.") if user.current_role != TenantAccountRole.OWNER: if dataset.permission == DatasetPermissionEnum.ONLY_ME and dataset.created_by != user.id: - logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}") + logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id) raise NoPermissionError("You do not have permission to access this dataset.") if dataset.permission == DatasetPermissionEnum.PARTIAL_TEAM: # For partial team permission, user needs explicit permission or be the creator @@ -662,7 +663,7 @@ class DatasetService: db.session.query(DatasetPermission).filter_by(dataset_id=dataset.id, account_id=user.id).first() ) if not user_permission: - logging.debug(f"User {user.id} does not have permission to access dataset {dataset.id}") + logging.debug("User %s does not have permission to access dataset %s", user.id, dataset.id) raise NoPermissionError("You do not have permission to access this dataset.") @staticmethod @@ -1174,7 +1175,7 @@ class DocumentService: ) else: logging.warning( - f"Invalid process rule mode: {process_rule.mode}, can not find dataset process rule" + "Invalid process rule mode: %s, can not find dataset process rule", process_rule.mode ) return db.session.add(dataset_process_rule) @@ -1862,7 +1863,7 @@ class DocumentService: task_func.delay(*task_args) except Exception as e: # Log the error but do not rollback the transaction - logging.exception(f"Error executing async task for document {update_info['document'].id}") + logging.exception("Error executing async task for document %s", update_info["document"].id) # don't raise the error immediately, but capture it for later propagation_error = e try: @@ -1873,7 +1874,7 @@ class DocumentService: redis_client.setex(indexing_cache_key, 600, 1) except Exception as e: # Log the error but do not rollback the transaction - logging.exception(f"Error setting cache for document {update_info['document'].id}") + logging.exception("Error setting cache for document %s", update_info["document"].id) # Raise any propagation error after all updates if propagation_error: raise propagation_error diff --git a/api/services/hit_testing_service.py b/api/services/hit_testing_service.py index 519d5abca5..5a3f504035 100644 --- a/api/services/hit_testing_service.py +++ b/api/services/hit_testing_service.py @@ -77,7 +77,7 @@ class HitTestingService: ) end = time.perf_counter() - logging.debug(f"Hit testing retrieve in {end - start:0.4f} seconds") + logging.debug("Hit testing retrieve in %s seconds", end - start) dataset_query = DatasetQuery( dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id @@ -113,7 +113,7 @@ class HitTestingService: ) end = time.perf_counter() - logging.debug(f"External knowledge hit testing retrieve in {end - start:0.4f} seconds") + logging.debug("External knowledge hit testing retrieve in %s seconds", end - start) dataset_query = DatasetQuery( dataset_id=dataset.id, content=query, source="hit_testing", created_by_role="account", created_by=account.id diff --git a/api/services/model_provider_service.py b/api/services/model_provider_service.py index 0a0a5619e1..54197bf949 100644 --- a/api/services/model_provider_service.py +++ b/api/services/model_provider_service.py @@ -380,7 +380,7 @@ class ModelProviderService: else None ) except Exception as e: - logger.debug(f"get_default_model_of_model_type error: {e}") + logger.debug("get_default_model_of_model_type error: %s", e) return None def update_default_model_of_model_type(self, tenant_id: str, model_type: str, provider: str, model: str) -> None: diff --git a/api/services/plugin/data_migration.py b/api/services/plugin/data_migration.py index 5324036414..7a4f886bf5 100644 --- a/api/services/plugin/data_migration.py +++ b/api/services/plugin/data_migration.py @@ -110,7 +110,7 @@ limit 1000""" ) ) logger.exception( - f"[{processed_count}] Failed to migrate [{table_name}] {record_id} ({provider_name})" + "[%s] Failed to migrate [%s] %s (%s)", processed_count, table_name, record_id, provider_name ) continue @@ -183,7 +183,7 @@ limit 1000""" ) ) logger.exception( - f"[{processed_count}] Failed to migrate [{table_name}] {record_id} ({provider_name})" + "[%s] Failed to migrate [%s] %s (%s)", processed_count, table_name, record_id, provider_name ) continue diff --git a/api/services/plugin/plugin_migration.py b/api/services/plugin/plugin_migration.py index 1806fbcfd6..222d70a317 100644 --- a/api/services/plugin/plugin_migration.py +++ b/api/services/plugin/plugin_migration.py @@ -78,7 +78,7 @@ class PluginMigration: ) ) except Exception: - logger.exception(f"Failed to process tenant {tenant_id}") + logger.exception("Failed to process tenant %s", tenant_id) futures = [] @@ -136,7 +136,7 @@ class PluginMigration: try: tenants.append(tenant_id) except Exception: - logger.exception(f"Failed to process tenant {tenant_id}") + logger.exception("Failed to process tenant %s", tenant_id) continue futures.append( @@ -273,7 +273,7 @@ class PluginMigration: result.append(ToolProviderID(tool_entity.provider_id).plugin_id) except Exception: - logger.exception(f"Failed to process tool {tool}") + logger.exception("Failed to process tool %s", tool) continue return result @@ -301,7 +301,7 @@ class PluginMigration: plugins: dict[str, str] = {} plugin_ids = [] plugin_not_exist = [] - logger.info(f"Extracting unique plugins from {extracted_plugins}") + logger.info("Extracting unique plugins from %s", extracted_plugins) with open(extracted_plugins) as f: for line in f: data = json.loads(line) @@ -318,7 +318,7 @@ class PluginMigration: else: plugin_not_exist.append(plugin_id) except Exception: - logger.exception(f"Failed to fetch plugin unique identifier for {plugin_id}") + logger.exception("Failed to fetch plugin unique identifier for %s", plugin_id) plugin_not_exist.append(plugin_id) with ThreadPoolExecutor(max_workers=10) as executor: @@ -339,7 +339,7 @@ class PluginMigration: # use a fake tenant id to install all the plugins fake_tenant_id = uuid4().hex - logger.info(f"Installing {len(plugins['plugins'])} plugin instances for fake tenant {fake_tenant_id}") + logger.info("Installing %s plugin instances for fake tenant %s", len(plugins["plugins"]), fake_tenant_id) thread_pool = ThreadPoolExecutor(max_workers=workers) @@ -348,7 +348,7 @@ class PluginMigration: plugin_install_failed.extend(response.get("failed", [])) def install(tenant_id: str, plugin_ids: list[str]) -> None: - logger.info(f"Installing {len(plugin_ids)} plugins for tenant {tenant_id}") + logger.info("Installing %s plugins for tenant %s", len(plugin_ids), tenant_id) # fetch plugin already installed installed_plugins = manager.list_plugins(tenant_id) installed_plugins_ids = [plugin.plugin_id for plugin in installed_plugins] @@ -408,7 +408,7 @@ class PluginMigration: installation = manager.list_plugins(fake_tenant_id) except Exception: - logger.exception(f"Failed to get installation for tenant {fake_tenant_id}") + logger.exception("Failed to get installation for tenant %s", fake_tenant_id) Path(output_file).write_text( json.dumps( @@ -491,7 +491,9 @@ class PluginMigration: else: failed.append(reverse_map[plugin.plugin_unique_identifier]) logger.error( - f"Failed to install plugin {plugin.plugin_unique_identifier}, error: {plugin.message}" + "Failed to install plugin %s, error: %s", + plugin.plugin_unique_identifier, + plugin.message, ) done = True diff --git a/api/services/recommend_app/remote/remote_retrieval.py b/api/services/recommend_app/remote/remote_retrieval.py index 80e1aefc01..85f3a02825 100644 --- a/api/services/recommend_app/remote/remote_retrieval.py +++ b/api/services/recommend_app/remote/remote_retrieval.py @@ -20,7 +20,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase): try: result = self.fetch_recommended_app_detail_from_dify_official(app_id) except Exception as e: - logger.warning(f"fetch recommended app detail from dify official failed: {e}, switch to built-in.") + logger.warning("fetch recommended app detail from dify official failed: %s, switch to built-in.", e) result = BuildInRecommendAppRetrieval.fetch_recommended_app_detail_from_builtin(app_id) return result @@ -28,7 +28,7 @@ class RemoteRecommendAppRetrieval(RecommendAppRetrievalBase): try: result = self.fetch_recommended_apps_from_dify_official(language) except Exception as e: - logger.warning(f"fetch recommended apps from dify official failed: {e}, switch to built-in.") + logger.warning("fetch recommended apps from dify official failed: %s, switch to built-in.", e) result = BuildInRecommendAppRetrieval.fetch_recommended_apps_from_builtin(language) return result diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index 65f05d2986..841eeb4333 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -337,7 +337,7 @@ class BuiltinToolManageService: max_number = max(numbers) return f"{default_pattern} {max_number + 1}" except Exception as e: - logger.warning(f"Error generating next provider name for {provider}: {str(e)}") + logger.warning("Error generating next provider name for %s: %s", provider, str(e)) # fallback return f"{credential_type.get_name()} 1" diff --git a/api/services/tools/tools_transform_service.py b/api/services/tools/tools_transform_service.py index 2d192e6f7f..52fbc0979c 100644 --- a/api/services/tools/tools_transform_service.py +++ b/api/services/tools/tools_transform_service.py @@ -275,7 +275,7 @@ class ToolTransformService: username = user.name except Exception: - logger.exception(f"failed to get user name for api provider {db_provider.id}") + logger.exception("failed to get user name for api provider %s", db_provider.id) # add provider into providers credentials = db_provider.credentials result = ToolProviderApiEntity( diff --git a/api/tasks/delete_account_task.py b/api/tasks/delete_account_task.py index d3b33e3052..ef50adf8d5 100644 --- a/api/tasks/delete_account_task.py +++ b/api/tasks/delete_account_task.py @@ -16,11 +16,11 @@ def delete_account_task(account_id): try: BillingService.delete_account(account_id) except Exception as e: - logger.exception(f"Failed to delete account {account_id} from billing service.") + logger.exception("Failed to delete account %s from billing service.", account_id) raise if not account: - logger.error(f"Account {account_id} not found.") + logger.error("Account %s not found.", account_id) return # send success email send_deletion_success_task.delay(account.email) diff --git a/api/tasks/ops_trace_task.py b/api/tasks/ops_trace_task.py index 2e77332ffe..c7e0047664 100644 --- a/api/tasks/ops_trace_task.py +++ b/api/tasks/ops_trace_task.py @@ -43,13 +43,11 @@ def process_trace_tasks(file_info): if trace_type: trace_info = trace_type(**trace_info) trace_instance.trace(trace_info) - logging.info(f"Processing trace tasks success, app_id: {app_id}") + logging.info("Processing trace tasks success, app_id: %s", app_id) except Exception as e: - logging.info( - f"error:\n\n\n{e}\n\n\n\n", - ) + logging.info("error:\n\n\n%s\n\n\n\n", e) failed_key = f"{OPS_TRACE_FAILED_KEY}_{app_id}" redis_client.incr(failed_key) - logging.info(f"Processing trace tasks failed, app_id: {app_id}") + logging.info("Processing trace tasks failed, app_id: %s", app_id) finally: storage.delete(file_path) diff --git a/api/tasks/remove_app_and_related_data_task.py b/api/tasks/remove_app_and_related_data_task.py index 1619f8c546..b6f772dd60 100644 --- a/api/tasks/remove_app_and_related_data_task.py +++ b/api/tasks/remove_app_and_related_data_task.py @@ -201,7 +201,7 @@ def _delete_app_workflow_runs(tenant_id: str, app_id: str): batch_size=1000, ) - logging.info(f"Deleted {deleted_count} workflow runs for app {app_id}") + logging.info("Deleted %s workflow runs for app %s", deleted_count, app_id) def _delete_app_workflow_node_executions(tenant_id: str, app_id: str): @@ -215,7 +215,7 @@ def _delete_app_workflow_node_executions(tenant_id: str, app_id: str): batch_size=1000, ) - logging.info(f"Deleted {deleted_count} workflow node executions for app {app_id}") + logging.info("Deleted %s workflow node executions for app %s", deleted_count, app_id) def _delete_app_workflow_app_logs(tenant_id: str, app_id: str): @@ -342,6 +342,6 @@ def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: s db.session.commit() logging.info(click.style(f"Deleted {name} {record_id}", fg="green")) except Exception: - logging.exception(f"Error occurred while deleting {name} {record_id}") + logging.exception("Error occurred while deleting %s %s", name, record_id) continue rs.close() diff --git a/api/tasks/remove_document_from_index_task.py b/api/tasks/remove_document_from_index_task.py index 3f73cc7b40..dc3128d369 100644 --- a/api/tasks/remove_document_from_index_task.py +++ b/api/tasks/remove_document_from_index_task.py @@ -49,7 +49,7 @@ def remove_document_from_index_task(document_id: str): try: index_processor.clean(dataset, index_node_ids, with_keywords=True, delete_child_chunks=False) except Exception: - logging.exception(f"clean dataset {dataset.id} from index failed") + logging.exception("clean dataset %s from index failed", dataset.id) # update segment to disable db.session.query(DocumentSegment).where(DocumentSegment.document_id == document.id).update( {