Skip to content

Commit a9a881c

Browse files
liqunfuguschmueyf711
authored
Integrate onnx 1.17.0 (microsoft#21897)
### Description <!-- Describe your changes. --> for ORT 1.21.0 release Create following related issues to track skipped tests due to updated ONNX operators in the ONNX 1.17.0 release: microsoft#23162 microsoft#23164 microsoft#23163 microsoft#23161 ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. --> --------- Signed-off-by: Liqun Fu <[email protected]> Signed-off-by: Liqun Fu <[email protected]> Co-authored-by: Guenther Schmuelling <[email protected]> Co-authored-by: Yifan Li <[email protected]> Co-authored-by: yf711 <[email protected]>
1 parent 81cd6ea commit a9a881c

File tree

20 files changed

+95
-982
lines changed

20 files changed

+95
-982
lines changed

cgmanifests/generated/cgmanifest.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@
2626
"component": {
2727
"type": "git",
2828
"git": {
29-
"commitHash": "595228d99e3977ac27cb79d5963adda262af99ad",
29+
"commitHash": "b8baa8446686496da4cc8fda09f2b6fe65c2a02c",
3030
"repositoryUrl": "https://github.com/onnx/onnx.git"
3131
},
3232
"comments": "git submodule at cmake/external/onnx"

cmake/deps.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ microsoft_gsl;https://github.com/microsoft/GSL/archive/refs/tags/v4.0.0.zip;cf36
3535
microsoft_wil;https://github.com/microsoft/wil/archive/refs/tags/v1.0.230629.1.zip;e4a542a323c070376f7c2d1973d0f7ddbc1d2fa5
3636
mimalloc;https://github.com/microsoft/mimalloc/archive/refs/tags/v2.1.1.zip;d5ee7d34223d0567892db5179849939c8769dc41
3737
mp11;https://github.com/boostorg/mp11/archive/refs/tags/boost-1.82.0.zip;9bc9e01dffb64d9e0773b2e44d2f22c51aace063
38-
onnx;https://github.com/onnx/onnx/archive/refs/tags/v1.16.1.zip;2eb9198bb352757d5ff13977cbe0634898e0837c
38+
onnx;https://github.com/onnx/onnx/archive/refs/tags/v1.17.0.zip;13a60ac5217c104139ce0fd024f48628e7bcf5bc
3939
# Use the latest commit of 10.7-GA
4040
onnx_tensorrt;https://github.com/onnx/onnx-tensorrt/archive/9c69a24bc2e20c8a511a4e6b06fd49639ec5300a.zip;ff1fe9af78eb129b4a4cdcb7450b7390b4436dd3
4141
protobuf;https://github.com/protocolbuffers/protobuf/archive/refs/tags/v21.12.zip;7cf2733949036c7d52fda017badcab093fe73bfa

cmake/external/onnx

Submodule onnx updated 908 files

cmake/patches/onnx/onnx.patch

Lines changed: 0 additions & 941 deletions
Large diffs are not rendered by default.

js/web/docs/webgl-operators.md

Lines changed: 15 additions & 15 deletions
Large diffs are not rendered by default.

onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -465,7 +465,7 @@ class GraphRef {
465465
} // namespace api
466466

467467
constexpr int64_t kMinSupportedOpset = 7;
468-
constexpr int64_t kMaxSupportedOpset = 21;
468+
constexpr int64_t kMaxSupportedOpset = 22;
469469

470470
// enum of results that a CostCheckFn can return.
471471
enum class CostCheckResult {

onnxruntime/core/providers/cpu/reduction/reduction_ops.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -384,6 +384,14 @@ class ReduceAggregatorMax : public ReduceAggregator<T> {
384384
}
385385
inline void update(const T& v) { this->accumulator_ = v > this->accumulator_ ? v : this->accumulator_; }
386386

387+
static void fill_for_empty_set(Tensor& output) {
388+
if constexpr (std::is_same_v<bool, T>) { /* bool specific impl */
389+
ORT_NOT_IMPLEMENTED();
390+
} else {
391+
EigenMap<T>(output).array() = -std::numeric_limits<T>::infinity();
392+
}
393+
}
394+
387395
// Fast reduction
388396
static inline FastReduceKind WhichFastReduce() {
389397
return FastReduceKind::kKR | FastReduceKind::kRK | FastReduceKind::kKRK | FastReduceKind::kRKR;
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
optimum>=1.14.1
22
transformers>=4.33.2,<= 4.38.0
33
torch>=2.2.0
4-
onnx==1.16.1
4+
onnx==1.17.0
55
datasets>=2.8.0
66
protobuf==3.20.2
77
psutil
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
1-
onnx==1.16.1
1+
onnx==1.17.0
22
transformers>=4.36.2
33
onnxscript>=0.1.0.dev20240126

onnxruntime/test/contrib_ops/fused_matmul_op_test.cc

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -222,10 +222,10 @@ TEST(FusedMatMulOpTest, FloatTypeNoTranspose) {
222222
}
223223

224224
#if defined(USE_CUDA) || defined(USE_ROCM) // double support only implemented in CUDA/ROCM kernel
225-
226-
TEST(FusedMatMulOpTest, DoubleTypeNoTranspose) {
227-
RunFusedMatMulTest<double>("FusedMatMul", 1);
228-
}
225+
// CUDAExecutionProvider cannot be used with this model due to its ONNX opset not being supported by the layout transformer.
226+
// TEST(FusedMatMulOpTest, DoubleTypeNoTranspose) {
227+
// RunFusedMatMulTest<double>("FusedMatMul", 1);
228+
// }
229229
#endif
230230

231231
TEST(FusedMatMulOpTest, FloatTypeTransposeA) {

onnxruntime/test/onnx/TestCase.cc

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1396,6 +1396,10 @@ std::unique_ptr<std::set<BrokenTest>> GetBrokenTests(const std::string& provider
13961396
broken_tests->insert({"resize_upsample_sizes_nearest", "result differs"});
13971397
broken_tests->insert({"resize_upsample_sizes_nearest_axes_2_3", "result differs"});
13981398
broken_tests->insert({"resize_upsample_sizes_nearest_axes_3_2", "result differs"});
1399+
broken_tests->insert({"convtranspose_group_2", "group attribute (new of opset(22)) not supported"});
1400+
broken_tests->insert({"convtranspose_group_2_image_3", "group attribute (new of opset(22)) not supported"});
1401+
broken_tests->insert({"resize_upsample_sizes_nearest_not_larger",
1402+
"output=Y:expected 1 (3f800000), got 4 (40800000), diff: 3, tol=0.002 idx=24. 13 of 49 differ. CPU test passed."});
13991403
}
14001404

14011405
#ifdef DISABLE_CONTRIB_OPS

onnxruntime/test/providers/xnnpack/xnnpack_basic_test.cc

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -295,7 +295,7 @@ TEST(XnnpackEP, DISABLED_TestQDQAveragePool) { // [ONNXRuntimeError] : 9 : NOT
295295
});
296296
}
297297

298-
TEST(XnnpackEP, TestMaxPool) {
298+
TEST(XnnpackEP, DISABLED_TestMaxPool) { // NOT_IMPLEMENTED : Could not find an implementation for MaxPool(22) node with name 'node'
299299
const std::vector<int64_t> input_shape = {1, 2, 13, 13};
300300
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
301301
auto* input_arg = builder.MakeInput<float>(input_shape, -1.f, 1.f);
@@ -360,7 +360,7 @@ TEST(XnnpackEP, TestQDQSoftMax_axisZero_v13) {
360360
{ExpectedEPNodeAssignment::None});
361361
}
362362

363-
TEST(XnnpackEP, TestSoftMax_axisLast) {
363+
TEST(XnnpackEP, TestSoftMax_axisLast) { // error: Expected equality of these values
364364
const std::vector<int64_t> input_shape = {1, 2, 3, 5};
365365
int64_t axis = input_shape.size() - 1;
366366
auto modelCreater = [input_shape, axis](ModelTestBuilder& builder) {
@@ -379,7 +379,7 @@ TEST(XnnpackEP, TestSoftMax_axisLast) {
379379
{ExpectedEPNodeAssignment::All});
380380
}
381381

382-
TEST(XnnpackEP, TestQDQSoftMax_axisLast) {
382+
TEST(XnnpackEP, TestQDQSoftMax_axisLast) { // error: Expected equality of these values
383383
RunModelTest(BuildQDQSoftMaxTestCase<uint8_t, uint8_t>(
384384
{1, 2, 3, 5} /* input_shape */,
385385
static_cast<int64_t>(3) /* axis */,
@@ -395,7 +395,7 @@ TEST(XnnpackEP, TestConvTranspose) {
395395
RunModelTestWithPath(ort_model_path, "test_conv_follow_convtrans", nullptr);
396396
}
397397

398-
TEST(XnnpackEP, TestConvTranspose_With_Outputpadding) {
398+
TEST(XnnpackEP, DISABLED_TestConvTranspose_With_Outputpadding) { // NOT_IMPLEMENTED : Could not find an implementation for ConvTranspose(22) node with name 'node'
399399
const std::vector<int64_t> input_shape = {1, 4, 15, 15};
400400
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
401401
auto* input_arg = builder.MakeInput<float>(input_shape, -127.f, 127.f);
@@ -415,7 +415,7 @@ TEST(XnnpackEP, TestConvTranspose_With_Outputpadding) {
415415
});
416416
}
417417

418-
TEST(XnnpackEP, TestConvTranspose_With_OutputShape) {
418+
TEST(XnnpackEP, DISABLED_TestConvTranspose_With_OutputShape) { // NOT_IMPLEMENTED : Could not find an implementation for ConvTranspose(22) node with name 'node'
419419
const std::vector<int64_t> input_shape = {1, 4, 15, 15};
420420
auto modelBuilder = [&input_shape](ModelTestBuilder& builder) {
421421
auto* input_arg = builder.MakeInput<float>(input_shape, -127.f, 127.f);
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
1-
onnx==1.16.1
1+
onnx==1.17.0
22
pytest

onnxruntime/test/shared_lib/test_inference.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1222,7 +1222,7 @@ TEST(CApiTest, invalid_variadic_input_min_arity_custom_op) {
12221222
Ort::Session session(*ort_env, VARIADIC_INPUT_OUTPUT_CUSTOM_OP_MODEL_URI, session_options);
12231223
FAIL();
12241224
} catch (const Ort::Exception& excpt) {
1225-
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node (VariadicNode0) has input size 3 not in range [min=4"));
1225+
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node(VariadicNode0) with schema(test::VariadicNode:1) has input size 3 not in range [min=4,"));
12261226
}
12271227
}
12281228

@@ -1252,7 +1252,7 @@ TEST(CApiTest, invalid_variadic_output_min_arity_custom_op) {
12521252
Ort::Session session(*ort_env, VARIADIC_INPUT_OUTPUT_CUSTOM_OP_MODEL_URI, session_options);
12531253
FAIL();
12541254
} catch (const Ort::Exception& excpt) {
1255-
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node (VariadicNode0) has output size 3 not in range [min=4"));
1255+
ASSERT_THAT(excpt.what(), testing::HasSubstr("Error Node(VariadicNode0) with schema(test::VariadicNode:1) has output size 3 not in range [min=4"));
12561256
}
12571257
}
12581258

onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc

Lines changed: 44 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -323,7 +323,46 @@
323323
"^test_dequantizelinear_int4",
324324
"^test_dequantizelinear_uint4",
325325
"^test_quantizelinear_int4",
326-
"^test_quantizelinear_uint4"
326+
"^test_quantizelinear_uint4",
327+
// onnx 1.17.0 op tests: skip until implemented in ORT
328+
"^test_acos*", // Could not find an implementation for Acos(22)
329+
"^test_acosh*", // Could not find an implementation for Acosh(22)
330+
"^test_asin*", // Could not find an implementation for Asin(22)
331+
"^test_asinh*", // Could not find an implementation for Asinh(22)
332+
"^test_atan*", // Could not find an implementation for Atan(22)
333+
"^test_atanh*", // Could not find an implementation for Atanh(22)
334+
"^test_basic_conv_with_padding*", // Could not find an implementation for Conv(22)
335+
"^test_basic_conv_without_padding*", // Could not find an implementation for Conv(22)
336+
"^test_conv*", // Could not find an implementation for Conv(22)
337+
"^test_convtranspose*", // Could not find an implementation for ConvTranspose(22)
338+
"^test_cos*", // Could not find an implementation for Cos(22)
339+
"^test_cosh*", // Could not find an implementation for Cosh(22)
340+
"^test_det*", // Could not find an implementation for Det(22)
341+
"^test_dropout*", // Could not find an implementation for Dropout(22)
342+
"^test_elu*", // Could not find an implementation for Elu(22)
343+
"^test_eyelike*", // Could not find an implementation for EyeLike(22)
344+
"^test_globalaveragepool*", // Could not find an implementation for GlobalAveragePool(22)
345+
"^test_globalmaxpool*", // Could not find an implementation for GlobalMaxPool(22)
346+
"^test_gridsample*", // Could not find an implementation for GridSample(22)
347+
"^test_gru*", // Could not find an implementation for GRU(22)
348+
"^test_hardsigmoid*", // Could not find an implementation for HardSigmoid(22)
349+
"^test_hardswish*", // Could not find an implementation for HardSigmoid(22)
350+
"^test_instancenorm*", // Could not find an implementation for InstanceNormalization(22)
351+
"^test_lppool*", // Could not find an implementation for LpPool(22)
352+
"^test_lstm*", // Could not find an implementation for LSTM(22)
353+
"^test_maxpool*", // Could not find an implementation for MaxPool(22)
354+
"^test_maxunpool*", // Could not find an implementation for MaxUnpool(22)
355+
"^test_mish*", // Could not find an implementation for Softplus(22)
356+
"^test_rnn*", // Could not find an implementation for RNN(22)
357+
"^test_round*", // Could not find an implementation for Round(22)
358+
"^test_selu*", // Could not find an implementation for Selu(22)
359+
"^test_simple_rnn*", // Could not find an implementation for RNN(22)
360+
"^test_sin*", // Could not find an implementation for Sin(22)
361+
"^test_sinh*", // Could not find an implementation for Sinh(22)
362+
"^test_softplus*", // Could not find an implementation for Softplus(22)
363+
"^test_softsign*", // Could not find an implementation for Softsign(22)
364+
"^test_tan*", // Could not find an implementation for Tan(22)
365+
"^test_thresholdedrelu*" // Could not find an implementation for ThresholdedRelu(22)
327366
],
328367
"current_failing_tests_x86": [
329368
"^test_vgg19",
@@ -426,6 +465,7 @@
426465
"^test_gelu_tanh_2_expanded_cpu",
427466
"^test_reduce_max_bool_inputs",
428467
"^test_reduce_min_bool_inputs",
468+
"^test_reduce_max_empty_set", // DNNL result in "(shapes (2, 1, 4), (1, 0, 1) mismatch)". this is the same for test_reduce_min_empty_set which is already in the list
429469
"^test_reduce_min_empty_set",
430470
"^test_reduce_l1_empty_set",
431471
"^test_reduce_l1_empty_set_expanded",
@@ -752,7 +792,9 @@
752792
"^test_reduce_prod_empty_set_cpu",
753793
//Bug: DML EP does not execute operators with an empty input tensor
754794
//TODO: Resolve as a graph implementation that returns a constant inf tensor with appropriate strides
755-
"^test_reduce_min_empty_set_cpu"
795+
"^test_reduce_max_empty_set_cpu", // DNNL result in "(shapes (2, 1, 4), (1, 0, 1) mismatch)". this is the same for test_reduce_min_empty_set which is already in the list
796+
"^test_reduce_min_empty_set_cpu",
797+
"^test_resize_upsample_sizes_nearest_not_smaller_cpu"
756798
],
757799
// ORT first supported opset 7, so models with nodes that require versions prior to opset 7 are not supported
758800
"tests_with_pre_opset7_dependencies": [

tools/ci_build/github/azure-pipelines/templates/download-deps.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ steps:
1111
packageType: upack
1212
feed: '/7424c8e4-5c62-490e-95c4-79446f31017c'
1313
definition: '517c4f6f-5437-4392-a70d-4f15ec5be2f0'
14-
version: 1.0.202
14+
version: 1.0.203
1515
downloadPath: $(Build.BinariesDirectory)/deps
1616

1717
# The private ADO project
@@ -22,7 +22,7 @@ steps:
2222
packageType: upack
2323
feed: '/4c7631f5-24c0-4307-8822-1aa8f180c325'
2424
definition: 'fd9dd5ad-b73e-4678-890e-edcf680dbc1a'
25-
version: 1.0.202
25+
version: 1.0.203
2626
downloadPath: $(Build.BinariesDirectory)/deps
2727

2828
# You can add more ADO accounts at here.

tools/ci_build/github/linux/docker/scripts/lort/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ beartype==0.15.0
33
flatbuffers
44
cerberus
55
h5py
6-
onnx==1.16.1
6+
onnx==1.17.0
77
# Python dependencies required for pytorch development
88
astunparse
99
expecttest!=0.2.0

tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ mypy
44
pytest
55
setuptools>=68.2.2
66
wheel
7-
onnx==1.16.1
7+
onnx==1.17.0
88
protobuf==4.21.12
99
sympy==1.12 ; python_version < '3.9'
1010
sympy==1.13 ; python_version >= '3.9'

tools/ci_build/github/linux/docker/scripts/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ mypy
55
pytest
66
setuptools==69.0.3
77
wheel==0.42.0
8-
onnx==1.16.1
8+
onnx==1.17.0
99
argparse
1010
sympy==1.12
1111
flatbuffers

tools/ci_build/github/linux/python/requirements.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ mypy
33
pytest
44
setuptools>=68.2.2
55
wheel
6-
onnx==1.16.1
6+
onnx==1.17.0
77
protobuf==4.21.12
88
sympy==1.12
99
flatbuffers

0 commit comments

Comments
 (0)