Skip to content

Commit 2fc9079

Browse files
Revert "[Inductor] FlexAttention backward kernel optimization (pytorch#127208)"
This reverts commit f717131. Reverted pytorch#127208 on behalf of https://github.com/yanboliang due to test_flex_attention is failing internally ([comment](pytorch#127208 (comment)))
1 parent 3f45fa6 commit 2fc9079

File tree

3 files changed

+125
-184
lines changed

3 files changed

+125
-184
lines changed

test/inductor/test_flex_attention.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -144,8 +144,6 @@ def _check_equal(
144144
):
145145
compiled_error = (golden_out - compiled_out).abs().mean()
146146
ref_error = (golden_out - ref_out).abs().mean()
147-
if torch.isnan(compiled_error).any() and not torch.isnan(ref_error).any():
148-
self.assertTrue(False, "Output/Grad with NaN")
149147
if compiled_error > ref_error * fudge_factor:
150148
name = tensor_name if tensor_name is not None else ""
151149
msg = f"{name} Compiled error {compiled_error} is greater than ref error {ref_error} by more than {fudge_factor}X."
@@ -197,7 +195,7 @@ def run_test(
197195
self._check_equal(
198196
k_gold.grad, k_ref.grad, k.grad, k_fudge_factor, "Grad_Key"
199197
)
200-
v_fudge_factor = 4 * fudge_factor
198+
v_fudge_factor = 8 * fudge_factor
201199
self._check_equal(
202200
v_gold.grad, v_ref.grad, v.grad, v_fudge_factor, "Grad_Value"
203201
)

0 commit comments

Comments
 (0)