From 848d7f083ff5c169ac1810a62ec01e18d8e772f5 Mon Sep 17 00:00:00 2001 From: JC H Date: Sun, 11 Jan 2026 11:21:13 +0800 Subject: [PATCH] Fix the bug in detecting maximal token length in longcat image pipeline There is an additional "len" function, which leads to bug when the token length exceeds the preset maximal length. --- .../pipelines/longcat_image/pipeline_longcat_image_edit.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py b/src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py index e55a2a47f343..cdf95f635259 100644 --- a/src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py +++ b/src/diffusers/pipelines/longcat_image/pipeline_longcat_image_edit.py @@ -284,7 +284,7 @@ def _encode_prompt(self, prompt, image): if len(all_tokens) > self.tokenizer_max_length: logger.warning( "Your input was truncated because `max_sequence_length` is set to " - f" {self.tokenizer_max_length} input token nums : {len(len(all_tokens))}" + f" {self.tokenizer_max_length} input token nums : {len(all_tokens)}" ) all_tokens = all_tokens[: self.tokenizer_max_length]