Skip to content

Daisyden/artifacts4 #1672

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 7 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 40 additions & 20 deletions .github/scripts/check-ut.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
"IndexError",
"ImportError",
"AssertionError",
"Exception",
#"Exception",
"OSError",
"Failed",
#"Failed",
"TimeoutError",
"asyncio.TimeoutError",
"FileNotFoundError",
Expand Down Expand Up @@ -52,7 +52,7 @@ def get_result(case):
result = "failed"
return result

def get_message(case):
def get_message(case, failure_list=None):
if isinstance(case, dict):
return case.get('error', '')

Expand All @@ -66,28 +66,41 @@ def get_message(case):
capture_next_lines = False
indent_level = 0

collect_trace_done = False
collect_trace = False

for line in full_text.splitlines():
stripped_line = line.strip()
if not stripped_line:
continue

for error_type in error_types:
if stripped_line.startswith(error_type + ": "):
error_msg = stripped_line[len(error_type)+2:]
error_messages.append(f"{error_type}: {error_msg}")
capture_next_lines = True
indent_level = 0
break
elif f"{error_type}:" in stripped_line and "Traceback" not in stripped_line:
error_msg = stripped_line.split(f'{error_type}:')[-1].strip()
error_messages.append(f"{error_type}: {error_msg}")
capture_next_lines = True
indent_level = 0
break
# Only collet the first trace
if not collect_trace_done and "Traceback (most recent call last):" in stripped_line:
collect_trace = True

if collect_trace:
if "Error: " in stripped_line:
collect_trace = False
collect_trace_done = True
error_messages.append(f"{stripped_line}")
else:
for error_type in error_types:
if stripped_line.startswith(error_type + ": "):
error_msg = stripped_line[len(error_type)+2:]
error_messages.append(f"{error_type}: {error_msg}")
capture_next_lines = True
indent_level = 0
break
elif f"{error_type}:" in stripped_line and "Traceback" not in stripped_line:
error_msg = stripped_line.split(f'{error_type}:')[-1].strip()
error_messages.append(f"{error_type}: {error_msg}")
capture_next_lines = True
indent_level = 0
break

return " ; ".join(error_messages) if error_messages else f"{case.result[0].message.splitlines()[0]}"

def print_md_row(row, print_header=False):
def print_md_row(row, print_header=False, failure_list=None):
if print_header:
header = " | ".join([f"{key}" for key in row.keys()])
print(f"| {header} |")
Expand All @@ -96,7 +109,12 @@ def print_md_row(row, print_header=False):
row_values = " | ".join([f"{value}" for value in row.values()])
print(f"| {row_values} |")

def print_failures():
if failure_list is not None:
failure_list.write(f"| {row_values} |\n")



def print_failures(failure_list=None):
if not failures:
return

Expand All @@ -109,7 +127,7 @@ def print_failures():
'Status': get_result(case),
'Message': get_message(case),
'Source': case['source'] if isinstance(case, dict) else 'XML'
}, print_header)
}, print_header, failure_list=failure_list)
print_header = False

def parse_log_file(log_file):
Expand Down Expand Up @@ -251,7 +269,9 @@ def main():
else:
print(f"Skipping unknown file type: {input_file}", file=sys.stderr)

print_failures()
with open("ut_failure_list.csv", "w") as failure_list:
print_failures(failure_list=failure_list)

print_summary()


Expand Down
9 changes: 9 additions & 0 deletions .github/workflows/_linux_ut.yml
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,9 @@ jobs:
source activate xpu_op_${ZE_AFFINITY_MASK}
pip install junitparser
python .github/scripts/check-ut.py ${{ github.workspace }}/ut_log/*.xml >> $GITHUB_STEP_SUMMARY || true
if [ -e "ut_failure_list.csv" ];then
cp ut_failure_list.csv ${{ github.workspace }}/ut_log/ut_failure_list.csv >> $GITHUB_STEP_SUMMARY || true
fi
- name: UT Test Results Check
shell: bash
run: |
Expand Down Expand Up @@ -325,6 +328,12 @@ jobs:
with:
name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }}
path: ${{ github.workspace }}/ut_log
- name: Upload XPU UT Failure list
if: always()
uses: actions/upload-artifact@v4
with:
name: XPU-UT-Failure-List-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }}
path: ${{ github.workspace }}/ut_log/ut_failure_list.csv

distributed_ut_test:
runs-on: pvc_e2e
Expand Down
8 changes: 4 additions & 4 deletions test/xpu/skip_list_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"float8",
# workarounds for the following tests
# https://github.com/intel/torch-xpu-ops/issues/1214
"test_python_ref__refs_exp_xpu_complex128",
# "test_python_ref__refs_exp_xpu_complex128",
"test_python_ref__refs_sigmoid_xpu_complex128",
"test_python_ref_executor__refs_log2_executor_aten_xpu_complex128",
"test_python_ref_executor__refs_exp_executor_aten_xpu_complex128",
Expand Down Expand Up @@ -35,7 +35,7 @@
# AssertionError: The supported dtypes for sparse.sampled_addmm on device type xpu are incorrect! - OPs not supported
"test_dtypes_sparse_sampled_addmm_xpu",
# OPs not supported
"test_errors_dot_xpu",
# "test_errors_dot_xpu",
"test_errors_vdot_xpu",
# Linalg OPs not supported
"test_noncontiguous_samples_linalg_det_xpu_float32",
Expand All @@ -54,7 +54,7 @@
# RuntimeError: Long is not supported in oneDNN!
# RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive
# RuntimeError: Double and complex datatype matmul is not supported in oneDNN
"test_noncontiguous_samples_nn_functional_conv3d_xpu_int64",
# "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64",
"test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64",
"test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32",
Expand Down Expand Up @@ -775,7 +775,7 @@
"test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu",
"test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu",
# https://github.com/intel/torch-xpu-ops/issues/1432
"test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu",
# "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu",
"test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_False_d_model_12_xpu",
"test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu",
Expand Down
Loading