Skip to content

Commit 9deece1

Browse files
authored
[model] fix use_cache patching for gemma3 multimodal (#7500)
1 parent f06a74a commit 9deece1

File tree

1 file changed

+4
-0
lines changed

1 file changed

+4
-0
lines changed

src/llamafactory/model/patcher.py

+4
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,10 @@ def patch_config(
107107
setattr(config, "use_cache", True)
108108
logger.info_rank0("Using KV cache for faster generation.")
109109

110+
if config.architectures[0] == "Gemma3ForConditionalGeneration" and not model_args.use_cache:
111+
text_config = config.text_config
112+
setattr(text_config, "use_cache", False)
113+
110114
if getattr(config, "model_type", None) == "qwen":
111115
setattr(config, "use_flash_attn", model_args.flash_attn == "fa2")
112116
for dtype_name, dtype in [("fp16", torch.float16), ("bf16", torch.bfloat16), ("fp32", torch.float32)]:

0 commit comments

Comments
 (0)