diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index 5758c4e6d..c126c92f9 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -20,9 +20,9 @@ "IndexError", "ImportError", "AssertionError", - "Exception", + #"Exception", "OSError", - "Failed", + #"Failed", "TimeoutError", "asyncio.TimeoutError", "FileNotFoundError", @@ -52,7 +52,7 @@ def get_result(case): result = "failed" return result -def get_message(case): +def get_message(case, failure_list=None): if isinstance(case, dict): return case.get('error', '') @@ -66,28 +66,41 @@ def get_message(case): capture_next_lines = False indent_level = 0 + collect_trace_done = False + collect_trace = False + for line in full_text.splitlines(): stripped_line = line.strip() if not stripped_line: continue - for error_type in error_types: - if stripped_line.startswith(error_type + ": "): - error_msg = stripped_line[len(error_type)+2:] - error_messages.append(f"{error_type}: {error_msg}") - capture_next_lines = True - indent_level = 0 - break - elif f"{error_type}:" in stripped_line and "Traceback" not in stripped_line: - error_msg = stripped_line.split(f'{error_type}:')[-1].strip() - error_messages.append(f"{error_type}: {error_msg}") - capture_next_lines = True - indent_level = 0 - break + # Only collet the first trace + if not collect_trace_done and "Traceback (most recent call last):" in stripped_line: + collect_trace = True + + if collect_trace: + if "Error: " in stripped_line: + collect_trace = False + collect_trace_done = True + error_messages.append(f"{stripped_line}") + else: + for error_type in error_types: + if stripped_line.startswith(error_type + ": "): + error_msg = stripped_line[len(error_type)+2:] + error_messages.append(f"{error_type}: {error_msg}") + capture_next_lines = True + indent_level = 0 + break + elif f"{error_type}:" in stripped_line and "Traceback" not in stripped_line: + error_msg = stripped_line.split(f'{error_type}:')[-1].strip() + error_messages.append(f"{error_type}: {error_msg}") + capture_next_lines = True + indent_level = 0 + break return " ; ".join(error_messages) if error_messages else f"{case.result[0].message.splitlines()[0]}" -def print_md_row(row, print_header=False): +def print_md_row(row, print_header=False, failure_list=None): if print_header: header = " | ".join([f"{key}" for key in row.keys()]) print(f"| {header} |") @@ -96,7 +109,12 @@ def print_md_row(row, print_header=False): row_values = " | ".join([f"{value}" for value in row.values()]) print(f"| {row_values} |") -def print_failures(): + if failure_list is not None: + failure_list.write(f"| {row_values} |\n") + + + +def print_failures(failure_list=None): if not failures: return @@ -109,7 +127,7 @@ def print_failures(): 'Status': get_result(case), 'Message': get_message(case), 'Source': case['source'] if isinstance(case, dict) else 'XML' - }, print_header) + }, print_header, failure_list=failure_list) print_header = False def parse_log_file(log_file): @@ -251,7 +269,9 @@ def main(): else: print(f"Skipping unknown file type: {input_file}", file=sys.stderr) - print_failures() + with open("ut_failure_list.csv", "w") as failure_list: + print_failures(failure_list=failure_list) + print_summary() diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index 7f29d89d3..ea193a53b 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -295,6 +295,9 @@ jobs: source activate xpu_op_${ZE_AFFINITY_MASK} pip install junitparser python .github/scripts/check-ut.py ${{ github.workspace }}/ut_log/*.xml >> $GITHUB_STEP_SUMMARY || true + if [ -e "ut_failure_list.csv" ];then + cp ut_failure_list.csv ${{ github.workspace }}/ut_log/ut_failure_list.csv >> $GITHUB_STEP_SUMMARY || true + fi - name: UT Test Results Check shell: bash run: | @@ -325,6 +328,12 @@ jobs: with: name: Inductor-XPU-UT-Data-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }} path: ${{ github.workspace }}/ut_log + - name: Upload XPU UT Failure list + if: always() + uses: actions/upload-artifact@v4 + with: + name: XPU-UT-Failure-List-${{ github.event.pull_request.number || github.sha }}-${{ env.UT_NAME }} + path: ${{ github.workspace }}/ut_log/ut_failure_list.csv distributed_ut_test: runs-on: pvc_e2e diff --git a/test/xpu/skip_list_common.py b/test/xpu/skip_list_common.py index 1d29f9972..eb0b9aff7 100644 --- a/test/xpu/skip_list_common.py +++ b/test/xpu/skip_list_common.py @@ -6,7 +6,7 @@ "float8", # workarounds for the following tests # https://github.com/intel/torch-xpu-ops/issues/1214 - "test_python_ref__refs_exp_xpu_complex128", + # "test_python_ref__refs_exp_xpu_complex128", "test_python_ref__refs_sigmoid_xpu_complex128", "test_python_ref_executor__refs_log2_executor_aten_xpu_complex128", "test_python_ref_executor__refs_exp_executor_aten_xpu_complex128", @@ -35,7 +35,7 @@ # AssertionError: The supported dtypes for sparse.sampled_addmm on device type xpu are incorrect! - OPs not supported "test_dtypes_sparse_sampled_addmm_xpu", # OPs not supported - "test_errors_dot_xpu", + # "test_errors_dot_xpu", "test_errors_vdot_xpu", # Linalg OPs not supported "test_noncontiguous_samples_linalg_det_xpu_float32", @@ -54,7 +54,7 @@ # RuntimeError: Long is not supported in oneDNN! # RuntimeError: could not create a primitive descriptor for a deconvolution forward propagation primitive # RuntimeError: Double and complex datatype matmul is not supported in oneDNN - "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64", + # "test_noncontiguous_samples_nn_functional_conv3d_xpu_int64", "test_noncontiguous_samples_nn_functional_conv_transpose1d_xpu_int64", "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_complex64", "test_noncontiguous_samples_nn_functional_conv_transpose2d_xpu_float32", @@ -775,7 +775,7 @@ "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_2_xpu", "test_scaled_dot_product_attention_3D_input_dim_2D_attn_mask_dropout_p_0_0_xpu", # https://github.com/intel/torch-xpu-ops/issues/1432 - "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu", + # "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_2_key_padding_mask_dim_2_bool_xpu", "test_multiheadattention_fastpath_attn_mask_attn_mask_dim_3_key_padding_mask_dim_2_bool_xpu", "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_False_d_model_12_xpu", "test_transformerencoder_fastpath_use_torchscript_False_enable_nested_tensor_False_use_autocast_True_d_model_12_xpu",