diff --git a/cgmanifests/submodules/cgmanifest.json b/cgmanifests/submodules/cgmanifest.json index ecdf9250af69c..ff6e46a6b5366 100644 --- a/cgmanifests/submodules/cgmanifest.json +++ b/cgmanifests/submodules/cgmanifest.json @@ -345,7 +345,7 @@ "component": { "type": "git", "git": { - "commitHash": "b71de776d2847b6e0fed54a888a8213a7812ad12", + "commitHash": "a7a0fec7f25cae567429af62b7eaaee1c3f0e247", "repositoryUrl": "https://github.com/onnx/onnx" }, "comments": "git submodule at cmake/external/onnx" diff --git a/cmake/external/onnx b/cmake/external/onnx index b71de776d2847..a7a0fec7f25ca 160000 --- a/cmake/external/onnx +++ b/cmake/external/onnx @@ -1 +1 @@ -Subproject commit b71de776d2847b6e0fed54a888a8213a7812ad12 +Subproject commit a7a0fec7f25cae567429af62b7eaaee1c3f0e247 diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker-gpu.sh b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker-gpu.sh index 19ade4bb82219..1641a5365d4a8 100755 --- a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker-gpu.sh +++ b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker-gpu.sh @@ -27,6 +27,6 @@ docker run --gpus all --rm \ -e "PackageName=$PackageName" \ -e "RunTestCsharp=$RunTestCsharp" \ -e "RunTestNative=$RunTestNative" \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \ /bin/bash /onnxruntime_src/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh \ /home/onnxruntimedev/$NUGET_REPO_DIRNAME /onnxruntime_src /home/onnxruntimedev $CurrentOnnxRuntimeVersion diff --git a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker.sh b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker.sh index 1b361a2a2e751..2c4954a9feaee 100755 --- a/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker.sh +++ b/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest-docker.sh @@ -35,6 +35,6 @@ docker run --rm \ -e "DisableMlOps=$DISABLEMLOPS" \ -e "RunTestCsharp=$RunTestCsharp" \ -e "RunTestNative=$RunTestNative" \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ /bin/bash /onnxruntime_src/csharp/test/Microsoft.ML.OnnxRuntime.EndToEndTests/runtest.sh \ /home/onnxruntimedev/$NUGET_REPO_DIRNAME /onnxruntime_src /home/onnxruntimedev $CurrentOnnxRuntimeVersion diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc index 32bae0d9678e6..04dfa69f32055 100644 --- a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc +++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc @@ -309,32 +309,32 @@ class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOn class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ArgMax); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ArgMin); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ArgMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceL1); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceL1); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceL2); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceL2); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceLogSum); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceLogSum); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceLogSumExp); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceLogSumExp); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceL1); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceL1); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceL2); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceL2); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceLogSum); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceLogSum); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceLogSumExp); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceLogSumExp); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, float, ReduceMax); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int32_t, ReduceMax); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int64_t, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceMean); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceMean); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceMean); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceMean); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, float, ReduceMin); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int32_t, ReduceMin); class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 11, int64_t, ReduceMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceProd); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceProd); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int64_t, ReduceProd); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceProd); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceProd); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int64_t, ReduceProd); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceSum); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, double, ReduceSum); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceSum); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int64_t, ReduceSum); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, ReduceSumSquare); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, double, ReduceSumSquare); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, int32_t, ReduceSumSquare); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, float, ReduceSumSquare); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, double, ReduceSumSquare); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, int32_t, ReduceSumSquare); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Hardmax); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, float, LogSoftmax); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, double, LogSoftmax); @@ -350,7 +350,7 @@ class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDoma class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, Slice); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Split); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Squeeze); -class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, Unsqueeze); +class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Unsqueeze); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, Det); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, 12, ScatterElements); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 11, NonMaxSuppression); @@ -402,17 +402,17 @@ class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDoma class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, Max); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, MaxPool); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, Pow); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, float, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int32_t, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int64_t, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int8_t, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, uint8_t, ReduceMax); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, float, ReduceMax); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int32_t, ReduceMax); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int64_t, ReduceMax); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int8_t, ReduceMax); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, uint8_t, ReduceMax); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, float, ReduceMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int32_t, ReduceMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int64_t, ReduceMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, int8_t, ReduceMin); -class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, uint8_t, ReduceMin); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, float, ReduceMin); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int32_t, ReduceMin); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int64_t, ReduceMin); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, int8_t, ReduceMin); +class ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, uint8_t, ReduceMin); class ONNX_OPERATOR_VERSIONED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, 12, GatherND); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 12, Einsum); @@ -458,7 +458,6 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Si class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Sign); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Size); class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, Sum); -class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Unsqueeze); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Flatten); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, LRN); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, MeanVarianceNormalization); @@ -548,6 +547,32 @@ class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, uint8_t, NonZero); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, GatherND); class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, Pad); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceL1); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceL1); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceL2); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceL2); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceLogSum); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceLogSum); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceLogSumExp); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceLogSumExp); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMax); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMax); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int64_t, ReduceMax); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int8_t, ReduceMax); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, uint8_t, ReduceMax); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMean); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMean); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceMin); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceMin); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int64_t, ReduceMin); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int8_t, ReduceMin); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, uint8_t, ReduceMin); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceProd); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceProd); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int64_t, ReduceProd); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, float, ReduceSumSquare); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, double, ReduceSumSquare); +class ONNX_OPERATOR_TYPED_KERNEL_CLASS_NAME(kCpuExecutionProvider, kOnnxDomain, 13, int32_t, ReduceSumSquare); template <> KernelCreateInfo BuildKernelCreateInfo() { @@ -1029,8 +1054,7 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) { BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, // OpSet 12 BuildKernelCreateInfo, @@ -1161,32 +1185,31 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) { BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, - BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, }; for (auto& function_table_entry : function_table) { diff --git a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc index 4ffdf5c45838b..db095a4498fea 100644 --- a/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/cpu/reduction/reduction_ops.cc @@ -73,6 +73,24 @@ namespace onnxruntime { KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType()), \ x); +#define REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(x, startVer, endVer) \ + ONNX_CPU_OPERATOR_VERSIONED_TYPED_KERNEL( \ + x, \ + startVer, \ + endVer, \ + int8_t, \ + KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType()), \ + x); + +#define REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(x, startVer, endVer) \ + ONNX_CPU_OPERATOR_VERSIONED_TYPED_KERNEL( \ + x, \ + startVer, \ + endVer, \ + uint8_t, \ + KernelDefBuilder().TypeConstraint("T", DataTypeImpl::GetTensorType()), \ + x); + #define REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(x, sinceVersion) \ ONNX_CPU_OPERATOR_TYPED_KERNEL( \ x, \ @@ -90,44 +108,62 @@ namespace onnxruntime { x); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL1, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL1, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL1, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL1, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL2, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL2, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceL2, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceL2, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSum, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSum, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSum, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSum, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSumExp, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceLogSumExp, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceLogSumExp, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 1, 10); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 1, 10); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 11, 11); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 11, 11); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMax, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMax, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMax, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMax, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMax, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMax, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(ReduceMax, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(ReduceMax, 12, 12); + +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMax, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMax, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMax, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMax, 13); + REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMean, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMean, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMean, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 1, 10); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 1, 10); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 11, 11); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 11, 11); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMin, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMin, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMin, 12); -REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMin, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceMin, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceMin, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT8_ONLY(ReduceMin, 12, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_UINT8_ONLY(ReduceMin, 12, 12); + +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceMin, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceMin, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_INT8_ONLY(ReduceMin, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_UINT8_ONLY(ReduceMin, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceProd, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceProd, 11); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceProd, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceProd, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceProd, 11, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_INT64_ONLY(ReduceProd, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceProd, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_INT64_ONLY(ReduceProd, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSum, 1, 10); REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSum, 11); @@ -137,9 +173,11 @@ REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSum, 1, 10); REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSum, 11); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSumSquare, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSumSquare, 11); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 1, 10); -REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 11); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ReduceSumSquare, 11, 12); +REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 11, 12); +REGISTER_UNARY_ELEMENTWISE_KERNEL(ReduceSumSquare, 13); +REGISTER_UNARY_ELEMENTWISE_KERNEL_DOUBLE_ONLY(ReduceSumSquare, 13); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ArgMax, 1, 10); REGISTER_UNARY_ELEMENTWISE_VERSIONED_KERNEL(ArgMax, 11, 12); diff --git a/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc b/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc index 4dbd286b80353..39be162f0b230 100644 --- a/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc +++ b/onnxruntime/core/providers/cpu/tensor/unsqueeze.cc @@ -18,18 +18,9 @@ ONNX_CPU_OPERATOR_VERSIONED_KERNEL( .TypeConstraint("T", DataTypeImpl::AllTensorTypes()), Unsqueeze); -ONNX_CPU_OPERATOR_VERSIONED_KERNEL( - Unsqueeze, - 11, - 12, - KernelDefBuilder() - .Alias(0, 0) - .TypeConstraint("T", DataTypeImpl::AllTensorTypes()), - Unsqueeze); - ONNX_CPU_OPERATOR_KERNEL( Unsqueeze, - 13, + 11, KernelDefBuilder() .Alias(0, 0) .TypeConstraint("T", DataTypeImpl::AllTensorTypes()), diff --git a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc index 9a6ce3f7f8048..3f9c5730dac8e 100644 --- a/onnxruntime/test/providers/cpu/controlflow/loop_test.cc +++ b/onnxruntime/test/providers/cpu/controlflow/loop_test.cc @@ -92,7 +92,11 @@ static const ONNX_NAMESPACE::GraphProto CreateSubgraph(const RunOptions& options bool is_cond_1d = options.subgraph_cond_1d_tensor; bool is_iter_num_1d = options.subgraph_iter_num_1d_tensor; - Model model("Loop subgraph", false, DefaultLoggingManager().DefaultLogger()); + // Loop tests use unsqueeze operator in it's subgraph. Unsqueeze was updated in opset13 + // This test can continue to use opset12 or can be updated to use the latest opset once + // unsqueeze op13 implementation is done. + Model model("Loop subgraph", false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 12}}, + {}, DefaultLoggingManager().DefaultLogger()); auto& graph = model.MainGraph(); std::vector inputs; diff --git a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc index 03e0f7e229052..232590359237e 100644 --- a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc +++ b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc @@ -272,7 +272,8 @@ static void RunTest_v8(const std::string test_name, int64_t batch_size, int64_t OpTester::ExpectResult expect_result = OpTester::ExpectResult::kExpectSuccess, const std::string& failure_message = "") { // create model that will be used to initialize subgraph. currently there's no direct way to create a Graph instance. - Model model(test_name, false, DefaultLoggingManager().DefaultLogger()); + Model model(test_name, false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 8}}, + {}, DefaultLoggingManager().DefaultLogger()); auto& graph = model.MainGraph(); auto status = CreateSubgraph(graph, options, options.add_bad_shape ? failure_message : ""); ASSERT_STATUS_OK(status); @@ -335,7 +336,8 @@ static void RunTest_v9(const std::string test_name, int64_t sequence_len, int64_ OpTester::ExpectResult expect_result = OpTester::ExpectResult::kExpectSuccess, const std::string& failure_message = "") { // create model that will be used to initialize subgraph. currently there's no direct way to create a Graph instance. - Model model(test_name, false, DefaultLoggingManager().DefaultLogger()); + Model model(test_name, false, ModelMetaData(), PathString(), IOnnxRuntimeOpSchemaRegistryList(), {{"", 11}}, + {}, DefaultLoggingManager().DefaultLogger()); auto& graph = model.MainGraph(); auto status = CreateSubgraph(graph, options, options.add_bad_shape ? failure_message : ""); if (!status.IsOK()) { @@ -343,7 +345,7 @@ static void RunTest_v9(const std::string test_name, int64_t sequence_len, int64_ } auto& proto = graph.ToGraphProto(); - ScanOpTester test{ (options.add_bad_shape) ? -1 : 11}; // use latest version - no significant change over 9 + ScanOpTester test{(options.add_bad_shape) ? -1 : 11}; // use latest version - no significant change over 9 test.AddAttribute("body", proto); test.AddAttribute("num_scan_inputs", 2); @@ -562,7 +564,9 @@ static void OuterScopeAccess_NoShapeInMainGraph_NoTypeAndShapeInSubgraph(bool is TEST_8_AND_9(OuterScopeAccess_NoShapeInMainGraph_NoTypeAndShapeInSubgraph); // shape inferencing is only strict for the latest version so only test BadShape with that -TEST(Scan9, BadShape) { +// Scan test uses Split operator in the subgraph. It was updated for opset13 +// Enable this test once Split for op13 is implemented. +TEST(Scan9, DISABLED_BadShape) { RunOptions options{}; options.is_v8 = false; options.include_dim_values_in_main_graph = false; diff --git a/onnxruntime/test/providers/cpu/math/logsoftmax_test.cc b/onnxruntime/test/providers/cpu/math/logsoftmax_test.cc index d4fa3564dbbc0..ac9b53cfb3d25 100644 --- a/onnxruntime/test/providers/cpu/math/logsoftmax_test.cc +++ b/onnxruntime/test/providers/cpu/math/logsoftmax_test.cc @@ -197,8 +197,9 @@ TEST(LogSoftmaxOperator, InvalidAxis) { // ONNX has a bug in the error message generation so this is somewhat cryptic until it's fixed. Message should be: // "[ShapeInferenceError] 'axis' must be in [-2 , 1]. Its actual value is: -7" ", 1]. Its actual value is: -7", - //latest opset so we get shape inferencing errrors - -1); //TensorRT parser: Assertion failed: axis >= 0 && axis < nbDims + // latest opset so we get shape inferencing errors + // Latest valid opset for this is 12. Once opset 13 changes are implemented this can be changed back to -1 + 12); //TensorRT parser: Assertion failed: axis >= 0 && axis < nbDims } } // namespace test diff --git a/onnxruntime/test/providers/cpu/math/softmax_test.cc b/onnxruntime/test/providers/cpu/math/softmax_test.cc index a61c63d38fae3..ed3d3fe9c4298 100644 --- a/onnxruntime/test/providers/cpu/math/softmax_test.cc +++ b/onnxruntime/test/providers/cpu/math/softmax_test.cc @@ -189,7 +189,8 @@ TEST(SoftmaxOperator, InvalidAxis) { // "[ShapeInferenceError] 'axis' must be in [-2 , 1]. Its actual value is: -10" ", 1]. Its actual value is: -10", // latest opset so we get shape inferencing errors - -1); + // Latest valid opset for this is 12. Once opset 13 changes are implemented this can be changed back to -1 + 12); } TEST(SoftmaxOperator, DimWithZero) { diff --git a/onnxruntime/test/providers/cpu/tensor/unsqueeze_op_test.cc b/onnxruntime/test/providers/cpu/tensor/unsqueeze_op_test.cc index 775f7d6e55f76..835451ec8c5c4 100644 --- a/onnxruntime/test/providers/cpu/tensor/unsqueeze_op_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/unsqueeze_op_test.cc @@ -46,7 +46,11 @@ TEST(TensorOpTest, Unsqueeze_3) { } TEST(TensorOpTest, Unsqueeze_Duplicate) { - OpTester test("Unsqueeze", -1); // use latest opset for shape inference errors + // This test is valid for opset 12. + // setting opset to -1 makes the test infra pick the latest available opset, so ort will pull the + // schema for that opset and do verification against it. Since opset 13 is enabled this test + //will fail schema validation. + OpTester test("Unsqueeze", 12); test.AddAttribute("axes", std::vector{2, 1, 0, 2}); test.AddInput("input", {2, 3, 4}, std::vector(2 * 3 * 4, 1.0f)); @@ -57,7 +61,11 @@ TEST(TensorOpTest, Unsqueeze_Duplicate) { } TEST(TensorOpTest, Unsqueeze_OutOfRange) { - OpTester test("Unsqueeze", -1); // use latest opset for shape inference errors + // This test is valid for opset 12. + // setting opset to -1 makes the test infra pick the latest available opset, so ort will pull the + // schema for that opset and do verification against it. Since opset 13 is enabled this test + // will fail schema validation. + OpTester test("Unsqueeze", 12); test.AddAttribute("axes", std::vector{4}); test.AddInput("input", {2, 3, 4}, std::vector(2 * 3 * 4, 1.0f)); diff --git a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc index 331c243281cc5..9b128a67350cc 100644 --- a/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc +++ b/onnxruntime/test/testdata/onnx_backend_test_series_filters.jsonc @@ -34,7 +34,92 @@ "^test_adam_multiple", // NOT_IMPLEMENTED : Could not find an implementation for the node Adam(1) "^test_training_dropout.*", // NOT_IMPLEMENTED : Could not find an implementation for the node Dropout(12) (Temporary, subsequent PR will add this -- we need training_mode change in the kernel) "^test_if_seq_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node If(13) - "^test_loop13_seq_cpu" // NOT_IMPLEMENTED : Could not find an implementation for the node Loop(13) + "^test_loop13_seq_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Loop(13) + "^test_hardmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Hardmax(13) + "^test_hardmax_axis_1_cpu", + "^test_hardmax_axis_2_cpu", + "^test_hardmax_default_axis_cpu", + "^test_hardmax_example_cpu", + "^test_hardmax_negative_axis_cpu", + "^test_hardmax_one_hot_cpu", + "^test_logsoftmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Logsoftmax(13) + "^test_logsoftmax_axis_0_expanded_cpu", + "^test_logsoftmax_axis_1_cpu", + "^test_logsoftmax_axis_1_expanded_cpu", + "^test_logsoftmax_axis_2_cpu", + "^test_logsoftmax_axis_2_expanded_cpu", + "^test_logsoftmax_default_axis_cpu", + "^test_logsoftmax_default_axis_expanded_cpu", + "^test_logsoftmax_example_1_cpu", + "^test_logsoftmax_example_1_expanded_cpu", + "^test_logsoftmax_large_number_cpu", + "^test_logsoftmax_large_number_expanded_cpu", + "^test_logsoftmax_negative_axis_cpu", + "^test_logsoftmax_negative_axis_expanded_cpu", + "^test_reduce_sum_default_axes_keepdims_example_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node ReduceSum(13) + "^test_reduce_sum_default_axes_keepdims_random_cpu", + "^test_reduce_sum_do_not_keepdims_example_cpu", + "^test_reduce_sum_do_not_keepdims_random_cpu", + "^test_reduce_sum_empty_axes_input_noop_example_cpu", + "^test_reduce_sum_empty_axes_input_noop_random_cpu", + "^test_reduce_sum_keepdims_example_cpu", + "^test_reduce_sum_keepdims_random_cpu", + "^test_reduce_sum_negative_axes_keepdims_example_cpu", + "^test_reduce_sum_negative_axes_keepdims_random_cpu", + "^test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Resize(13) + "^test_resize_downsample_scales_cubic_cpu", + "^test_resize_downsample_scales_linear_cpu", + "^test_resize_downsample_scales_nearest_cpu", + "^test_resize_downsample_sizes_cubic_cpu", + "^test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu", + "^test_resize_downsample_sizes_nearest_cpu", + "^test_resize_tf_crop_and_resize_cpu", + "^test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu", + "^test_resize_upsample_scales_cubic_align_corners_cpu", + "^test_resize_upsample_scales_cubic_asymmetric_cpu", + "^test_resize_upsample_scales_cubic_cpu", + "^test_resize_upsample_scales_linear_align_corners_cpu", + "^test_resize_upsample_scales_linear_cpu", + "^test_resize_upsample_scales_nearest_cpu", + "^test_resize_upsample_sizes_cubic_cpu", + "^test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu", + "^test_resize_upsample_sizes_nearest_cpu", + "^test_resize_upsample_sizes_nearest_floor_align_corners_cpu", + "^test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu", + "^test_sce_NCd1_mean_weight_negative_ii_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node NegaticeLogLikelihoodLoss(13) + "^test_sce_NCd1_mean_weight_negative_ii_expanded_cpu", + "^test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu", + "^test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu", + "^test_softmax_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Softmax(13) + "^test_softmax_axis_0_expanded_cpu", + "^test_softmax_axis_1_cpu", + "^test_softmax_axis_1_expanded_cpu", + "^test_softmax_axis_2_cpu", + "^test_softmax_axis_2_expanded_cpu", + "^test_softmax_default_axis_cpu", + "^test_softmax_default_axis_expanded_cpu", + "^test_softmax_example_cpu", + "^test_softmax_example_expanded_cpu", + "^test_softmax_large_number_cpu", + "^test_softmax_large_number_expanded_cpu", + "^test_softmax_negative_axis_cpu", + "^test_softmax_negative_axis_expanded_cpu", + "^test_split_equal_parts_1d_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Split(13) + "^test_split_equal_parts_2d_cpu", + "^test_split_equal_parts_default_axis_cpu", + "^test_split_variable_parts_1d_cpu", + "^test_split_variable_parts_2d_cpu", + "^test_split_variable_parts_default_axis_cpu", + "^test_split_zero_size_splits_cpu", + "^test_squeeze_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Squeeze(13) + "^test_squeeze_negative_axes_cpu", + "^test_unsqueeze_axis_0_cpu", // NOT_IMPLEMENTED : Could not find an implementation for the node Unsqueeze(13) + "^test_unsqueeze_axis_1_cpu", + "^test_unsqueeze_axis_2_cpu", + "^test_unsqueeze_negative_axes_cpu", + "^test_unsqueeze_three_axes_cpu", + "^test_unsqueeze_two_axes_cpu", + "^test_unsqueeze_unsorted_axes_cpu" ], "current_failing_tests_x86": [ "^test_vgg19", diff --git a/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml index a0b1ea5ba5344..19228b18b4a27 100644 --- a/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml +++ b/tools/ci_build/github/azure-pipelines/c-api-packaging-pipelines.yml @@ -18,7 +18,7 @@ jobs: script: | mkdir -p $HOME/.onnx docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro \ - --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g python3 \ + --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m python3 \ /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release \ --skip_submodule_sync --parallel --build_shared_lib --use_openmp workingDirectory: $(Build.SourcesDirectory) @@ -58,7 +58,7 @@ jobs: script: | mkdir -p $HOME/.onnx docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build \ - --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \ + --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \ python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release \ --skip_submodule_sync --parallel --build_shared_lib --use_cuda --cuda_version=10.2 --cuda_home=/usr/local/cuda-10.2 --cudnn_home=/usr/local/cuda-10.2 workingDirectory: $(Build.SourcesDirectory) diff --git a/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines-gpu.yml b/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines-gpu.yml index b895c72a9170f..b435f0e956510 100644 --- a/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines-gpu.yml +++ b/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines-gpu.yml @@ -24,7 +24,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_java --build_shared_lib --use_cuda --cuda_version=10.2 --cuda_home=/usr/local/cuda-10.2 --cudnn_home=/usr/local/cuda-10.2 + docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_java --build_shared_lib --use_cuda --cuda_version=10.2 --cuda_home=/usr/local/cuda-10.2 --cudnn_home=/usr/local/cuda-10.2 workingDirectory: $(Build.SourcesDirectory) - task: Docker@2 displayName: logout @@ -258,7 +258,7 @@ jobs: - task: CmdLine@2 inputs: script: | - docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h /onnxruntime_src/tools/ci_build/github/linux/java_linux_final_test.sh -v $(OnnxRuntimeVersion) -r /build + docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q /onnxruntime_src/tools/ci_build/github/linux/java_linux_final_test.sh -v $(OnnxRuntimeVersion) -r /build workingDirectory: $(Build.BinariesDirectory)/final-jar - task: Docker@2 diff --git a/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines.yml b/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines.yml index c2695cd8c7159..e1b34a8b55d90 100644 --- a/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines.yml +++ b/tools/ci_build/github/azure-pipelines/java-api-packaging-pipelines.yml @@ -26,7 +26,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --build_java --use_openmp --enable_onnx_tests && cd /build/Release && make install DESTDIR=/build/linux-x64" + docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --build_java --use_openmp --enable_onnx_tests && cd /build/Release && make install DESTDIR=/build/linux-x64" workingDirectory: $(Build.SourcesDirectory) displayName: 'Run build and test' - task: Docker@2 diff --git a/tools/ci_build/github/azure-pipelines/linux-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-ci-pipeline.yml index 777125a5c3708..d39ca753372da 100644 --- a/tools/ci_build/github/azure-pipelines/linux-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-ci-pipeline.yml @@ -28,7 +28,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/linux-cpu-minimal-build-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-cpu-minimal-build-ci-pipeline.yml index cfb000cce2961..363185d6cc585 100644 --- a/tools/ci_build/github/azure-pipelines/linux-cpu-minimal-build-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-cpu-minimal-build-ci-pipeline.yml @@ -39,7 +39,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=1 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ /bin/bash /onnxruntime_src/tools/ci_build/github/linux/ort_minimal/build_full_ort_and_create_ort_files.sh workingDirectory: $(Build.SourcesDirectory) - task: CmdLine@2 @@ -54,7 +54,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=1 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug\ @@ -76,7 +76,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=1 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ /bin/bash /onnxruntime_src/tools/ci_build/github/linux/ort_minimal/build_minimal_ort_and_run_tests.sh workingDirectory: $(Build.SourcesDirectory) - task: CmdLine@2 @@ -96,7 +96,7 @@ jobs: -e BUILD_SOURCEVERSION=$(Build.SourceVersion) \ -e BUILD_ID=$(Build.BuildId) \ -e DASHBOARD_MYSQL_ORT_PASSWORD=$(dashboard-mysql-ort-password) \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ /bin/bash /onnxruntime_src/tools/ci_build/github/linux/ort_minimal/build_minimal_ort_android_baseline_and_report_bin_size.sh workingDirectory: $(Build.SourcesDirectory) - task: Docker@2 diff --git a/tools/ci_build/github/azure-pipelines/linux-dnnl-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-dnnl-ci-pipeline.yml index 65d300bca9f8c..84ba202a3866f 100644 --- a/tools/ci_build/github/azure-pipelines/linux-dnnl-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-dnnl-ci-pipeline.yml @@ -27,7 +27,7 @@ jobs: --volume $HOME/.onnx:/home/onnxruntimedev/.onnx \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch5e \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch9j \ /opt/python/cp37-cp37m/bin/python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml index 72cd550ed101d..bb582af020b16 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml @@ -28,7 +28,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \ python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-cuda-11-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-cuda-11-pipeline.yml index 043f1bac766c0..eee57e90f8bf2 100644 --- a/tools/ci_build/github/azure-pipelines/linux-gpu-cuda-11-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-gpu-cuda-11-pipeline.yml @@ -28,7 +28,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecuda11build:ch5j \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecuda11build:ch9p \ /opt/python/cp37-cp37m/bin/python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/linux-multi-gpu-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-multi-gpu-ci-pipeline.yml index 2752379f551a5..cce2ae23d123a 100644 --- a/tools/ci_build/github/azure-pipelines/linux-multi-gpu-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-multi-gpu-ci-pipeline.yml @@ -27,7 +27,7 @@ jobs: --volume $HOME/.onnx:/home/onnxruntimedev/.onnx \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \ python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/linux-nocontribops-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-nocontribops-ci-pipeline.yml index 1a0efaa14dcea..b68290295d969 100644 --- a/tools/ci_build/github/azure-pipelines/linux-nocontribops-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/linux-nocontribops-ci-pipeline.yml @@ -28,7 +28,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m \ python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/nodejs/templates/cpu.yml b/tools/ci_build/github/azure-pipelines/nodejs/templates/cpu.yml index 9aa591ff27bf5..8a89be10cfd1f 100644 --- a/tools/ci_build/github/azure-pipelines/nodejs/templates/cpu.yml +++ b/tools/ci_build/github/azure-pipelines/nodejs/templates/cpu.yml @@ -48,7 +48,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx && docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro \ - --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g /bin/bash -c "python3 \ + --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m /bin/bash -c "python3 \ /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --build_nodejs \ --skip_submodule_sync --parallel --build_shared_lib --use_openmp && cd /onnxruntime_src/nodejs && npm pack" workingDirectory: $(Build.SourcesDirectory) diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-mklml.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-mklml.yml index df2c9c5ecf7a0..e152ebcb1b918 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-mklml.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-mklml.yml @@ -40,7 +40,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_openmp --enable_onnx_tests --use_mklml && cd /build/Release && make install DESTDIR=/build/linux-x64" + docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_openmp --enable_onnx_tests --use_mklml && cd /build/Release && make install DESTDIR=/build/linux-x64" workingDirectory: $(Build.SourcesDirectory) - task: Docker@2 displayName: logout diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-nocontribops-arm64.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-nocontribops-arm64.yml index 9108afce13ddf..1846b5cac2bb3 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-nocontribops-arm64.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu-nocontribops-arm64.yml @@ -82,7 +82,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --enable_onnx_tests --disable_contrib_ops --disable_ml_ops && cd /build/Release && make install DESTDIR=/build/linux-x64" + docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --enable_onnx_tests --disable_contrib_ops --disable_ml_ops && cd /build/Release && make install DESTDIR=/build/linux-x64" workingDirectory: $(Build.SourcesDirectory) - task: Docker@2 displayName: logout diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu.yml index cd49b1198c149..b1de8e341a9d0 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/cpu.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/cpu.yml @@ -105,7 +105,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch5g /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_openmp --enable_onnx_tests && cd /build/Release && make install DESTDIR=/build/linux-x64" + docker run --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentoscpubuild:ch9m /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_openmp --enable_onnx_tests && cd /build/Release && make install DESTDIR=/build/linux-x64" workingDirectory: $(Build.SourcesDirectory) - task: Docker@2 displayName: logout diff --git a/tools/ci_build/github/azure-pipelines/nuget/templates/gpu.yml b/tools/ci_build/github/azure-pipelines/nuget/templates/gpu.yml index 63d6538b17dbb..d5f57127c6ce0 100644 --- a/tools/ci_build/github/azure-pipelines/nuget/templates/gpu.yml +++ b/tools/ci_build/github/azure-pipelines/nuget/templates/gpu.yml @@ -89,7 +89,7 @@ jobs: inputs: script: | mkdir -p $HOME/.onnx - docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch5h \ + docker run --gpus all -e NVIDIA_VISIBLE_DEVICES=all --rm --volume /data/onnx:/data/onnx:ro --volume $(Build.SourcesDirectory):/onnxruntime_src --volume $(Build.BinariesDirectory):/build --volume /data/models:/build/models:ro --volume $HOME/.onnx:/home/onnxruntimedev/.onnx -e NIGHTLY_BUILD onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecentosgpubuild:ch9q \ /bin/bash -c "python3 /onnxruntime_src/tools/ci_build/build.py --build_dir /build --config Release --skip_submodule_sync --parallel --build_shared_lib --use_cuda --cuda_version=10.2 --cuda_home=/usr/local/cuda-10.2 --cudnn_home=/usr/local/cuda-10.2 --enable_onnx_tests && cd /build/Release && make install DESTDIR=/build/linux-x64" - task: Docker@2 displayName: logout diff --git a/tools/ci_build/github/azure-pipelines/orttraining-linux-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/orttraining-linux-ci-pipeline.yml index bf4234149884f..b0a6987f0ccdd 100644 --- a/tools/ci_build/github/azure-pipelines/orttraining-linux-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/orttraining-linux-ci-pipeline.yml @@ -28,7 +28,7 @@ jobs: -e ALLOW_RELEASED_ONNX_OPSET_ONLY=0 \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch5e \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch9j \ /opt/python/cp37-cp37m/bin/python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Debug Release \ diff --git a/tools/ci_build/github/azure-pipelines/templates/py-packaging-stage.yml b/tools/ci_build/github/azure-pipelines/templates/py-packaging-stage.yml index 4dfd0c6937e87..46672b3790dd5 100644 --- a/tools/ci_build/github/azure-pipelines/templates/py-packaging-stage.yml +++ b/tools/ci_build/github/azure-pipelines/templates/py-packaging-stage.yml @@ -104,7 +104,7 @@ stages: --volume $HOME/.onnx:/home/onnxruntimedev/.onnx \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch5e \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimecpubuild:ch9j \ $(python.manylinux.dir)/bin/python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Release \ @@ -182,7 +182,7 @@ stages: --volume $HOME/.onnx:/home/onnxruntimedev/.onnx \ -e NIGHTLY_BUILD \ -e BUILD_BUILDNUMBER \ - onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimegpubuild:ch5f \ + onnxruntimeregistry.azurecr.io/internal/azureml/onnxruntimegpubuild:ch9k \ $(python.manylinux.dir)/bin/python3 /onnxruntime_src/tools/ci_build/build.py \ --build_dir /build --cmake_generator Ninja \ --config Release \ diff --git a/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt b/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt index ef1eac2293f72..009eb1d0f99f1 100644 --- a/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt +++ b/tools/ci_build/github/linux/docker/scripts/manylinux/requirements.txt @@ -3,7 +3,7 @@ mypy pytest setuptools>=41.4.0 wheel -git+http://github.com/onnx/onnx.git@b71de776d2847b6e0fed54a888a8213a7812ad12#egg=onnx +git+http://github.com/onnx/onnx.git@a7a0fec7f25cae567429af62b7eaaee1c3f0e247#egg=onnx protobuf sympy==1.1.1 flake8 diff --git a/tools/ci_build/github/linux/docker/scripts/requirements.txt b/tools/ci_build/github/linux/docker/scripts/requirements.txt index 33fe9170ca5e2..46eb5faee3849 100644 --- a/tools/ci_build/github/linux/docker/scripts/requirements.txt +++ b/tools/ci_build/github/linux/docker/scripts/requirements.txt @@ -4,7 +4,7 @@ mypy pytest setuptools>=41.4.0 wheel -git+http://github.com/onnx/onnx.git@b71de776d2847b6e0fed54a888a8213a7812ad12#egg=onnx +git+http://github.com/onnx/onnx.git@a7a0fec7f25cae567429af62b7eaaee1c3f0e247#egg=onnx argparse sympy==1.1.1 flake8