Skip to content

Commit

Permalink
Update ONNX commit (microsoft#5487)
Browse files Browse the repository at this point in the history
* update ONNX

* update onnx + register kernels for reduction ops

* bug fix kernel reg

* update cgmanifests

* revert unsqueeze op 13 registration

* filter ops which are not implemented yet

* filter some tests

* update onnx commit to include conv transpose bug fix

* update docker images

* undo not required test changes

* fix test failures
  • Loading branch information
askhade authored Oct 21, 2020
1 parent b48f596 commit df22611
Show file tree
Hide file tree
Showing 32 changed files with 357 additions and 150 deletions.
2 changes: 1 addition & 1 deletion cgmanifests/submodules/cgmanifest.json
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@
"component": {
"type": "git",
"git": {
"commitHash": "b71de776d2847b6e0fed54a888a8213a7812ad12",
"commitHash": "a7a0fec7f25cae567429af62b7eaaee1c3f0e247",
"repositoryUrl": "https://github.com/onnx/onnx"
},
"comments": "git submodule at cmake/external/onnx"
Expand Down
2 changes: 1 addition & 1 deletion cmake/external/onnx
Submodule onnx updated 435 files
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,6 @@ docker run --gpus all --rm \
-e "PackageName=$PackageName" \
-e "RunTestCsharp=$RunTestCsharp" \
-e "RunTestNative=$RunTestNative" \
onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \
onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \
/bin/bash /onnxruntime_src/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh \
/home/onnxruntimedev/$NUGET_REPO_DIRNAME /onnxruntime_src /home/onnxruntimedev $CurrentOnnxRuntimeVersion
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@ docker run --rm \
-e "DisableMlOps=$DISABLEMLOPS" \
-e "RunTestCsharp=$RunTestCsharp" \
-e "RunTestNative=$RunTestNative" \
onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \
onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \
/bin/bash /onnxruntime_src/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh \
/home/onnxruntimedev/$NUGET_REPO_DIRNAME /onnxruntime_src /home/onnxruntimedev $CurrentOnnxRuntimeVersion
241 changes: 158 additions & 83 deletions onnxruntime/core/providers/cpu/cpu_execution_provider.cc

Large diffs are not rendered by default.

72 changes: 55 additions & 17 deletions onnxruntime/core/providers/cpu/reduction/reduction_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,24 @@ namespace onnxruntime {
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<int64_t>()), \
x<int64_t>);

#define REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(x, startVer, endVer) \
ONNX_CPU_OPERATOR_VERSIONED_TYPED_KERNEL( \
x, \
startVer, \
endVer, \
int8_t, \
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<int8_t>()), \
x<int8_t>);

#define REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(x, startVer, endVer) \
ONNX_CPU_OPERATOR_VERSIONED_TYPED_KERNEL( \
x, \
startVer, \
endVer, \
uint8_t, \
KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType<uint8_t>()), \
x<uint8_t>);

#define REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(x, sinceVersion) \
ONNX_CPU_OPERATOR_TYPED_KERNEL( \
x, \
Expand All @@ -90,44 +108,62 @@ namespace onnxruntime {
x<uint8_t>);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL1, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL1, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL1, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL1, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL2, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL2, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL2, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL2, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSum, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSum, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSum, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSum, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSumExp, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSumExp, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 11, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 11, 11);

REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMax, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMax, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMax, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMax, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(ReduceMax, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(ReduceMax, 12, 12);

REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMax, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMax, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMax, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMax, 13);


REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMean, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMean, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 11, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 11, 11);

REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMin, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMin, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMin, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMin, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(ReduceMin, 12, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(ReduceMin, 12, 12);

REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMin, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMin, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMin, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMin, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceProd, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceProd, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceProd, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceProd, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceProd, 11, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceProd, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceProd, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceProd, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSum, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSum, 11);
Expand All @@ -137,9 +173,11 @@ REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSum, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSum, 11);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSumSquare, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSumSquare, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 1, 10);
REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 11);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSumSquare, 11, 12);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 11, 12);
REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSumSquare, 13);
REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 13);

REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ArgMax, 1, 10);
REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ArgMax, 11, 12);
Expand Down
11 changes: 1 addition & 10 deletions onnxruntime/core/providers/cpu/tensor/unsqueeze.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,18 +18,9 @@ ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
.TypeConstraint("T", DataTypeImpl::AllTensorTypes()),
Unsqueeze);

ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
Unsqueeze,
11,
12,
KernelDefBuilder()
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorTypes()),
Unsqueeze);

ONNX_CPU_OPERATOR_KERNEL(
Unsqueeze,
13,
11,
KernelDefBuilder()
.Alias(0, 0)
.TypeConstraint("T", DataTypeImpl::AllTensorTypes()),
Expand Down
6 changes: 5 additions & 1 deletion onnxruntime/test/providers/cpu/controlflow/loop_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,11 @@ static const ONNX_NAMESPACE::GraphProto CreateSubgraph(const RunOptions& options
bool is_cond_1d = options.subgraph_cond_1d_tensor;
bool is_iter_num_1d = options.subgraph_iter_num_1d_tensor;

Model model("Loop subgraph", false, DefaultLoggingManager().DefaultLogger());
// Loop tests use unsqueeze operator in it's subgraph. Unsqueeze was updated in opset13
// This test can continue to use opset12 or can be updated to use the latest opset once
// unsqueeze op13 implementation is done.
Model model("Loop subgraph", false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 12}},
{}, DefaultLoggingManager().DefaultLogger());
auto& graph = model.MainGraph();

std::vector<NodeArg*> inputs;
Expand Down
12 changes: 8 additions & 4 deletions onnxruntime/test/providers/cpu/controlflow/scan_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,8 @@ static void RunTest_v8(const std::string test_name, int64_t batch_size, int64_t
OpTester::ExpectResult expect_result = OpTester::ExpectResult::kExpectSuccess,
const std::string& failure_message = "") {
// create model that will be used to initialize subgraph. currently there's no direct way to create a Graph instance.
Model model(test_name, false, DefaultLoggingManager().DefaultLogger());
Model model(test_name, false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 8}},
{}, DefaultLoggingManager().DefaultLogger());
auto& graph = model.MainGraph();
auto status = CreateSubgraph(graph, options, options.add_bad_shape ? failure_message : "");
ASSERT_STATUS_OK(status);
Expand Down Expand Up @@ -335,15 +336,16 @@ static void RunTest_v9(const std::string test_name, int64_t sequence_len, int64_
OpTester::ExpectResult expect_result = OpTester::ExpectResult::kExpectSuccess,
const std::string& failure_message = "") {
// create model that will be used to initialize subgraph. currently there's no direct way to create a Graph instance.
Model model(test_name, false, DefaultLoggingManager().DefaultLogger());
Model model(test_name, false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 11}},
{}, DefaultLoggingManager().DefaultLogger());
auto& graph = model.MainGraph();
auto status = CreateSubgraph(graph, options, options.add_bad_shape ? failure_message : "");
if (!status.IsOK()) {
return;
}
auto& proto = graph.ToGraphProto();

ScanOpTester test{ (options.add_bad_shape) ? -1 : 11}; // use latest version - no significant change over 9
ScanOpTester test{(options.add_bad_shape) ? -1 : 11}; // use latest version - no significant change over 9

test.AddAttribute("body", proto);
test.AddAttribute<int64_t>("num_scan_inputs", 2);
Expand Down Expand Up @@ -562,7 +564,9 @@ static void OuterScopeAccess_NoShapeInMainGraph_NoTypeAndShapeInSubgraph(bool is
TEST_8_AND_9(OuterScopeAccess_NoShapeInMainGraph_NoTypeAndShapeInSubgraph);

// shape inferencing is only strict for the latest version so only test BadShape with that
TEST(Scan9, BadShape) {
// Scan test uses Split operator in the subgraph. It was updated for opset13
// Enable this test once Split for op13 is implemented.
TEST(Scan9, DISABLED_BadShape) {
RunOptions options{};
options.is_v8 = false;
options.include_dim_values_in_main_graph = false;
Expand Down
5 changes: 3 additions & 2 deletions onnxruntime/test/providers/cpu/math/logsoftmax_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -197,8 +197,9 @@ TEST(LogSoftmaxOperator, InvalidAxis) {
// ONNX has a bug in the error message generation so this is somewhat cryptic until it's fixed. Message should be:
// "[ShapeInferenceError] 'axis' must be in [-2 , 1]. Its actual value is: -7"
", 1]. Its actual value is: -7",
//latest opset so we get shape inferencing errrors
-1); //TensorRT parser: Assertion failed: axis >= 0 && axis < nbDims
// latest opset so we get shape inferencing errors
// Latest valid opset for this is 12. Once opset 13 changes are implemented this can be changed back to -1
12); //TensorRT parser: Assertion failed: axis >= 0 && axis < nbDims
}

} // namespace test
Expand Down
3 changes: 2 additions & 1 deletion onnxruntime/test/providers/cpu/math/softmax_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -189,7 +189,8 @@ TEST(SoftmaxOperator, InvalidAxis) {
// "[ShapeInferenceError] 'axis' must be in [-2 , 1]. Its actual value is: -10"
", 1]. Its actual value is: -10",
// latest opset so we get shape inferencing errors
-1);
// Latest valid opset for this is 12. Once opset 13 changes are implemented this can be changed back to -1
12);
}

TEST(SoftmaxOperator, DimWithZero) {
Expand Down
12 changes: 10 additions & 2 deletions onnxruntime/test/providers/cpu/tensor/unsqueeze_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,11 @@ TEST(TensorOpTest, Unsqueeze_3) {
}

TEST(TensorOpTest, Unsqueeze_Duplicate) {
OpTester test("Unsqueeze", -1); // use latest opset for shape inference errors
// This test is valid for opset 12.
// setting opset to -1 makes the test infra pick the latest available opset, so ort will pull the
// schema for that opset and do verification against it. Since opset 13 is enabled this test
//will fail schema validation.
OpTester test("Unsqueeze", 12);

test.AddAttribute("axes", std::vector<int64_t>{2, 1, 0, 2});
test.AddInput<float>("input", {2, 3, 4}, std::vector<float>(2 * 3 * 4, 1.0f));
Expand All @@ -57,7 +61,11 @@ TEST(TensorOpTest, Unsqueeze_Duplicate) {
}

TEST(TensorOpTest, Unsqueeze_OutOfRange) {
OpTester test("Unsqueeze", -1); // use latest opset for shape inference errors
// This test is valid for opset 12.
// setting opset to -1 makes the test infra pick the latest available opset, so ort will pull the
// schema for that opset and do verification against it. Since opset 13 is enabled this test
// will fail schema validation.
OpTester test("Unsqueeze", 12);

test.AddAttribute("axes", std::vector<int64_t>{4});
test.AddInput<float>("input", {2, 3, 4}, std::vector<float>(2 * 3 * 4, 1.0f));
Expand Down
87 changes: 86 additions & 1 deletion onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,92 @@
"^test_adam_multiple", // NOT_IMPLEMENTED : Could not find an implementation for the node Adam(1)
"^test_training_dropout.*", // NOT_IMPLEMENTED : Could not find an implementation for the node Dropout(12) (Temporary, subsequent PR will add this -- we need training_mode change in the kernel)
"^test_if_seq_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node If(13)
"^test_loop13_seq_cpu" // NOT_IMPLEMENTED : Could not find an implementation for the node Loop(13)
"^test_loop13_seq_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Loop(13)
"^test_hardmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Hardmax(13)
"^test_hardmax_axis_1_cpu",
"^test_hardmax_axis_2_cpu",
"^test_hardmax_default_axis_cpu",
"^test_hardmax_example_cpu",
"^test_hardmax_negative_axis_cpu",
"^test_hardmax_one_hot_cpu",
"^test_logsoftmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Logsoftmax(13)
"^test_logsoftmax_axis_0_expanded_cpu",
"^test_logsoftmax_axis_1_cpu",
"^test_logsoftmax_axis_1_expanded_cpu",
"^test_logsoftmax_axis_2_cpu",
"^test_logsoftmax_axis_2_expanded_cpu",
"^test_logsoftmax_default_axis_cpu",
"^test_logsoftmax_default_axis_expanded_cpu",
"^test_logsoftmax_example_1_cpu",
"^test_logsoftmax_example_1_expanded_cpu",
"^test_logsoftmax_large_number_cpu",
"^test_logsoftmax_large_number_expanded_cpu",
"^test_logsoftmax_negative_axis_cpu",
"^test_logsoftmax_negative_axis_expanded_cpu",
"^test_reduce_sum_default_axes_keepdims_example_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node ReduceSum(13)
"^test_reduce_sum_default_axes_keepdims_random_cpu",
"^test_reduce_sum_do_not_keepdims_example_cpu",
"^test_reduce_sum_do_not_keepdims_random_cpu",
"^test_reduce_sum_empty_axes_input_noop_example_cpu",
"^test_reduce_sum_empty_axes_input_noop_random_cpu",
"^test_reduce_sum_keepdims_example_cpu",
"^test_reduce_sum_keepdims_random_cpu",
"^test_reduce_sum_negative_axes_keepdims_example_cpu",
"^test_reduce_sum_negative_axes_keepdims_random_cpu",
"^test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Resize(13)
"^test_resize_downsample_scales_cubic_cpu",
"^test_resize_downsample_scales_linear_cpu",
"^test_resize_downsample_scales_nearest_cpu",
"^test_resize_downsample_sizes_cubic_cpu",
"^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu",
"^test_resize_downsample_sizes_nearest_cpu",
"^test_resize_tf_crop_and_resize_cpu",
"^test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu",
"^test_resize_upsample_scales_cubic_align_corners_cpu",
"^test_resize_upsample_scales_cubic_asymmetric_cpu",
"^test_resize_upsample_scales_cubic_cpu",
"^test_resize_upsample_scales_linear_align_corners_cpu",
"^test_resize_upsample_scales_linear_cpu",
"^test_resize_upsample_scales_nearest_cpu",
"^test_resize_upsample_sizes_cubic_cpu",
"^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu",
"^test_resize_upsample_sizes_nearest_cpu",
"^test_resize_upsample_sizes_nearest_floor_align_corners_cpu",
"^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu",
"^test_sce_NCd1_mean_weight_negative_ii_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node NegaticeLogLikelihoodLoss(13)
"^test_sce_NCd1_mean_weight_negative_ii_expanded_cpu",
"^test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu",
"^test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu",
"^test_softmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Softmax(13)
"^test_softmax_axis_0_expanded_cpu",
"^test_softmax_axis_1_cpu",
"^test_softmax_axis_1_expanded_cpu",
"^test_softmax_axis_2_cpu",
"^test_softmax_axis_2_expanded_cpu",
"^test_softmax_default_axis_cpu",
"^test_softmax_default_axis_expanded_cpu",
"^test_softmax_example_cpu",
"^test_softmax_example_expanded_cpu",
"^test_softmax_large_number_cpu",
"^test_softmax_large_number_expanded_cpu",
"^test_softmax_negative_axis_cpu",
"^test_softmax_negative_axis_expanded_cpu",
"^test_split_equal_parts_1d_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Split(13)
"^test_split_equal_parts_2d_cpu",
"^test_split_equal_parts_default_axis_cpu",
"^test_split_variable_parts_1d_cpu",
"^test_split_variable_parts_2d_cpu",
"^test_split_variable_parts_default_axis_cpu",
"^test_split_zero_size_splits_cpu",
"^test_squeeze_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Squeeze(13)
"^test_squeeze_negative_axes_cpu",
"^test_unsqueeze_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Unsqueeze(13)
"^test_unsqueeze_axis_1_cpu",
"^test_unsqueeze_axis_2_cpu",
"^test_unsqueeze_negative_axes_cpu",
"^test_unsqueeze_three_axes_cpu",
"^test_unsqueeze_two_axes_cpu",
"^test_unsqueeze_unsorted_axes_cpu"
],
"current_failing_tests_x86": [
"^test_vgg19",
Expand Down
Loading

0 comments on commit df22611

Please sign in to comment.