@@ -471,7 +471,8 @@ def test_safetensors_fp32_export(recipe_name):
471471 if recipe_name == "MXFP8BlockScaling" :
472472 pytest .xfail (
473473 "MXFP8BlockScaling: FusedAdam CUDA kernel does not support "
474- "MXFP8 quantized tensors, causing illegal memory access"
474+ "MXFP8 quantized tensors, causing illegal memory access. "
475+ "Fixed by https://github.com/NVIDIA/TransformerEngine/pull/2789."
475476 )
476477
477478 from safetensors .torch import load_file , save_file
@@ -560,7 +561,8 @@ def test_dcp_output_parity(recipe_name, async_save):
560561 "MXFP8BlockScaling: FusedAdam CUDA kernel does not support "
561562 "MXFP8 quantized tensors, causing illegal memory access: "
562563 "/transformer_engine/common/multi_tensor/multi_tensor_apply.cuh:92 in function "
563- "multi_tensor_apply: CUDA Error: an illegal memory access was encountered"
564+ "multi_tensor_apply: CUDA Error: an illegal memory access was encountered. "
565+ "Fixed by https://github.com/NVIDIA/TransformerEngine/pull/2789."
564566 )
565567
566568 if recipe_name == "NVFP4BlockScaling" :
@@ -765,7 +767,8 @@ def test_dcp_resharding_save(recipe_name):
765767 if recipe_name == "MXFP8BlockScaling" :
766768 pytest .xfail (
767769 "MXFP8BlockScaling: FusedAdam CUDA kernel does not support "
768- "MXFP8 quantized tensors, causing illegal memory access"
770+ "MXFP8 quantized tensors, causing illegal memory access. "
771+ "Fixed by https://github.com/NVIDIA/TransformerEngine/pull/2789."
769772 )
770773 if recipe_name == "NVFP4BlockScaling" :
771774 pytest .xfail (
@@ -852,7 +855,8 @@ def test_dcp_resharding_load(recipe_name):
852855 if recipe_name == "MXFP8BlockScaling" :
853856 pytest .xfail (
854857 "MXFP8BlockScaling: FusedAdam CUDA kernel does not support "
855- "MXFP8 quantized tensors, causing illegal memory access"
858+ "MXFP8 quantized tensors, causing illegal memory access. "
859+ "Fixed by https://github.com/NVIDIA/TransformerEngine/pull/2789."
856860 )
857861 if recipe_name == "NVFP4BlockScaling" :
858862 pytest .xfail (
0 commit comments