From 07109f5c9c2aa992f48dc75812b1fb884a367fdc Mon Sep 17 00:00:00 2001 From: irenab Date: Tue, 7 Jan 2025 21:19:57 +0200 Subject: [PATCH] temporary disable slow tests --- .../test_features_runner.py | 48 +++++++------- .../model_tests/test_feature_models_runner.py | 64 +++++++++---------- 2 files changed, 56 insertions(+), 56 deletions(-) diff --git a/tests/keras_tests/feature_networks_tests/test_features_runner.py b/tests/keras_tests/feature_networks_tests/test_features_runner.py index e34525ac1..86e36e778 100644 --- a/tests/keras_tests/feature_networks_tests/test_features_runner.py +++ b/tests/keras_tests/feature_networks_tests/test_features_runner.py @@ -791,30 +791,30 @@ def test_uniform_range_selection_softmax_activation(self): UniformRangeSelectionBoundedActivationTest(self, QuantizationErrorMethod.LP).run_test() UniformRangeSelectionBoundedActivationTest(self, QuantizationErrorMethod.KL).run_test() - def test_multi_head_attention(self): - q_seq_len, kv_seq_len = 5, 6 - q_dim, k_dim, v_dim = 11, 12, 13 - num_heads, qk_proj_dim, v_proj_dim = 3, 4, 7 - attention_axes = [1, 3] - num_iterations = 9 - for separate_key_value in [False, True]: - MultiHeadAttentionTest(self, [(q_seq_len, q_dim), - (kv_seq_len, k_dim), - (kv_seq_len, v_dim)], - num_heads, qk_proj_dim, v_proj_dim, None, - separate_key_value=separate_key_value, output_dim=15).run_test() - input_shapes = [(2, num_iterations, q_seq_len, q_dim), - (2, num_iterations, kv_seq_len, k_dim), - (2, num_iterations, kv_seq_len, v_dim)] - MultiHeadAttentionTest(self, input_shapes, - num_heads, qk_proj_dim, v_proj_dim, attention_axes, - separate_key_value=separate_key_value, output_dim=14).run_test() - MultiHeadAttentionTest(self, input_shapes, - num_heads, qk_proj_dim, v_proj_dim, attention_axes, - separate_key_value=separate_key_value, output_dim=None).run_test() - MultiHeadAttentionTest(self, input_shapes, - num_heads, qk_proj_dim, v_proj_dim, None, - separate_key_value=separate_key_value, output_dim=14).run_test() + # def test_multi_head_attention(self): + # q_seq_len, kv_seq_len = 5, 6 + # q_dim, k_dim, v_dim = 11, 12, 13 + # num_heads, qk_proj_dim, v_proj_dim = 3, 4, 7 + # attention_axes = [1, 3] + # num_iterations = 9 + # for separate_key_value in [False, True]: + # MultiHeadAttentionTest(self, [(q_seq_len, q_dim), + # (kv_seq_len, k_dim), + # (kv_seq_len, v_dim)], + # num_heads, qk_proj_dim, v_proj_dim, None, + # separate_key_value=separate_key_value, output_dim=15).run_test() + # input_shapes = [(2, num_iterations, q_seq_len, q_dim), + # (2, num_iterations, kv_seq_len, k_dim), + # (2, num_iterations, kv_seq_len, v_dim)] + # MultiHeadAttentionTest(self, input_shapes, + # num_heads, qk_proj_dim, v_proj_dim, attention_axes, + # separate_key_value=separate_key_value, output_dim=14).run_test() + # MultiHeadAttentionTest(self, input_shapes, + # num_heads, qk_proj_dim, v_proj_dim, attention_axes, + # separate_key_value=separate_key_value, output_dim=None).run_test() + # MultiHeadAttentionTest(self, input_shapes, + # num_heads, qk_proj_dim, v_proj_dim, None, + # separate_key_value=separate_key_value, output_dim=14).run_test() def test_qat(self): QATWrappersTest(self, layers.Conv2D(3, 4, activation='relu'), test_loading=True).run_test() diff --git a/tests/pytorch_tests/model_tests/test_feature_models_runner.py b/tests/pytorch_tests/model_tests/test_feature_models_runner.py index 685403b42..9e9d334ae 100644 --- a/tests/pytorch_tests/model_tests/test_feature_models_runner.py +++ b/tests/pytorch_tests/model_tests/test_feature_models_runner.py @@ -247,24 +247,24 @@ def test_linear_function(self): """ LinearFNetTest(self).run_test() - def test_matmul_function(self): - """ - This test checks the MatMul substitution function - """ - MatMulFNetTest(self, [3, 5, 10], [3, 10, 8]).run_test() - MatMulOpNetTest(self, [3, 5, 10], [3, 10, 8]).run_test() - MatMulFNetTest(self, [3, 2, 5, 10], [3, 2, 10, 20]).run_test() - MatMulOpNetTest(self, [3, 2, 5, 10], [3, 2, 10, 20]).run_test() - MatMulFNetTest(self, [50, 2, 400, 32], [50, 1, 32, 80]).run_test() - MatMulOpNetTest(self, [50, 2, 400, 32], [50, 1, 32, 80]).run_test() - MatMulFNetTest(self, [3, 1, 5, 10], [3, 8, 10, 3]).run_test() - MatMulOpNetTest(self, [3, 1, 5, 10], [3, 8, 10, 3]).run_test() - MatMulFNetTest(self, [3, 1, 4, 5, 10], [3, 8, 1, 10, 10]).run_test() - MatMulOpNetTest(self, [3, 1, 4, 5, 10], [3, 8, 1, 10, 10]).run_test() - MatMulFNetTest(self, [3, 10, 6, 5, 50, 100], [3, 10, 1, 1, 100, 80]).run_test() - MatMulOpNetTest(self, [3, 10, 6, 5, 50, 100], [3, 10, 1, 1, 100, 80]).run_test() - MatMulFNetTest(self, [3, 1, 7, 1, 50, 100], [3, 10, 7, 5, 100, 80]).run_test() - MatMulOpNetTest(self, [3, 1, 7, 1, 50, 100], [3, 10, 7, 5, 100, 80]).run_test() + # def test_matmul_function(self): + # """ + # This test checks the MatMul substitution function + # """ + # MatMulFNetTest(self, [3, 5, 10], [3, 10, 8]).run_test() + # MatMulOpNetTest(self, [3, 5, 10], [3, 10, 8]).run_test() + # MatMulFNetTest(self, [3, 2, 5, 10], [3, 2, 10, 20]).run_test() + # MatMulOpNetTest(self, [3, 2, 5, 10], [3, 2, 10, 20]).run_test() + # MatMulFNetTest(self, [50, 2, 400, 32], [50, 1, 32, 80]).run_test() + # MatMulOpNetTest(self, [50, 2, 400, 32], [50, 1, 32, 80]).run_test() + # MatMulFNetTest(self, [3, 1, 5, 10], [3, 8, 10, 3]).run_test() + # MatMulOpNetTest(self, [3, 1, 5, 10], [3, 8, 10, 3]).run_test() + # MatMulFNetTest(self, [3, 1, 4, 5, 10], [3, 8, 1, 10, 10]).run_test() + # MatMulOpNetTest(self, [3, 1, 4, 5, 10], [3, 8, 1, 10, 10]).run_test() + # MatMulFNetTest(self, [3, 10, 6, 5, 50, 100], [3, 10, 1, 1, 100, 80]).run_test() + # MatMulOpNetTest(self, [3, 10, 6, 5, 50, 100], [3, 10, 1, 1, 100, 80]).run_test() + # MatMulFNetTest(self, [3, 1, 7, 1, 50, 100], [3, 10, 7, 5, 100, 80]).run_test() + # MatMulOpNetTest(self, [3, 1, 7, 1, 50, 100], [3, 10, 7, 5, 100, 80]).run_test() def test_broken_net(self): """ @@ -639,20 +639,20 @@ def test_mixed_precision_distance_functions(self): """ MixedPrecisionDistanceFunctions(self).run_test() - def test_mha_layer_test(self): - """ - This test checks the MultiHeadAttentionDecomposition feature. - """ - num_heads = [3, 7, 5, 11] - q_seq_len, kv_seq_len = [8, 11, 4, 18], [13, 9, 2, 11] - qdim, kdim, vdim = [7, 23, 2, 4], [9, None, 7, None], [11, 17, 7, None] - for iter in range(len(num_heads)): - MHALayerNetTest(self, num_heads[iter], q_seq_len[iter], qdim[iter] * num_heads[iter], - kv_seq_len[iter], kdim[iter], vdim[iter], bias=True).run_test() - MHALayerNetTest(self, num_heads[iter], q_seq_len[iter], qdim[iter] * num_heads[iter], - kv_seq_len[iter], kdim[iter], vdim[iter], bias=False).run_test() - MHALayerNetFeatureTest(self, num_heads[0], q_seq_len[0], qdim[0] * num_heads[0], - kv_seq_len[0], kdim[0], vdim[0], bias=True, add_bias_kv=True).run_test() + # def test_mha_layer_test(self): + # """ + # This test checks the MultiHeadAttentionDecomposition feature. + # """ + # num_heads = [3, 7, 5, 11] + # q_seq_len, kv_seq_len = [8, 11, 4, 18], [13, 9, 2, 11] + # qdim, kdim, vdim = [7, 23, 2, 4], [9, None, 7, None], [11, 17, 7, None] + # for iter in range(len(num_heads)): + # MHALayerNetTest(self, num_heads[iter], q_seq_len[iter], qdim[iter] * num_heads[iter], + # kv_seq_len[iter], kdim[iter], vdim[iter], bias=True).run_test() + # MHALayerNetTest(self, num_heads[iter], q_seq_len[iter], qdim[iter] * num_heads[iter], + # kv_seq_len[iter], kdim[iter], vdim[iter], bias=False).run_test() + # MHALayerNetFeatureTest(self, num_heads[0], q_seq_len[0], qdim[0] * num_heads[0], + # kv_seq_len[0], kdim[0], vdim[0], bias=True, add_bias_kv=True).run_test() def test_scaled_dot_product_attention_layer(self): """