From c916a805aa4c9137c8bba7cf9432247f99116f8d Mon Sep 17 00:00:00 2001 From: comfyanonymous <121283862+comfyanonymous@users.noreply.github.com> Date: Wed, 15 Oct 2025 00:17:20 -0400 Subject: [PATCH] gfx942 doesn't support fp8 operations. --- comfy/model_management.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/comfy/model_management.py b/comfy/model_management.py index 709ebc40b256..d82d5b8b00ae 100644 --- a/comfy/model_management.py +++ b/comfy/model_management.py @@ -349,7 +349,7 @@ def amd_min_version(device=None, min_rdna_version=0): if any((a in arch) for a in ["gfx1201"]): ENABLE_PYTORCH_ATTENTION = True if torch_version_numeric >= (2, 7) and rocm_version >= (6, 4): - if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx942", "gfx950"]): # TODO: more arches + if any((a in arch) for a in ["gfx1200", "gfx1201", "gfx950"]): # TODO: more arches, "gfx942" gives error on pytorch nightly 2.10 1013 rocm7.0 SUPPORT_FP8_OPS = True except: