Skip to content

Commit e51bcfb

Browse files
authored
Implement DepthToSpace uint8_t and Enable DropQDQNodesRules (microsoft#23352)
### Description <!-- Describe your changes. --> - Implemented the DepthToSpace uint8_t kernel. - Enabled DropQDQNodesRules for DepthToSpace. - Added unit tests for the DepthToSpace uint8_t kernel. ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. --> This commit aims to enhance the performance of the Image Super-Resolution INT8 Model (RFDN). Specifically, it improves the Inference Per Second (IPS) by 25%, providing a significant boost in efficiency and speed.
1 parent 331fc36 commit e51bcfb

File tree

5 files changed

+64
-12
lines changed

5 files changed

+64
-12
lines changed

docs/OperatorKernels.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,8 +84,8 @@ Do not modify directly.*
8484
|||[11, 13]|**T** = tensor(double), tensor(float), tensor(int32), tensor(int64)<br/> **T2** = tensor(int32), tensor(int64)|
8585
|DFT|*in* input:**T1**<br> *in* dft_length:**T2**<br> *in* axis:**tensor(int64)**<br> *out* output:**T1**<br><br>or<br><br>*in* input:**T1**<br> *in* dft_length:**T2**<br> *out* output:**T1**|20+|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
8686
|||[17, 19]|**T1** = tensor(double), tensor(float)<br/> **T2** = tensor(int32), tensor(int64)|
87-
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(double), tensor(float)|
88-
|||[11, 12]|**T** = tensor(double), tensor(float)|
87+
|DepthToSpace|*in* input:**T**<br> *out* output:**T**|13+|**T** = tensor(double), tensor(float), tensor(uint8)|
88+
|||[11, 12]|**T** = tensor(double), tensor(float), tensor(uint8)|
8989
|||[1, 10]|**T** = tensor(double), tensor(float)|
9090
|DequantizeLinear|*in* x:**T**<br> *in* x_scale:**tensor(float)**<br> *in* x_zero_point:**T**<br> *out* y:**tensor(float)**<br><br>or<br><br>*in* x:**T1**<br> *in* x_scale:**T2**<br> *in* x_zero_point:**T1**<br> *out* y:**T2**|21+|**T1** = tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int16), tensor(int32), tensor(int4), tensor(int8), tensor(uint16), tensor(uint4), tensor(uint8)<br/> **T2** = tensor(float), tensor(float16)|
9191
|||[19, 20]|**T1** = tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz), tensor(int32), tensor(int8), tensor(uint8)<br/> **T2** = tensor(float), tensor(float16)|

onnxruntime/core/optimizer/qdq_transformer/selectors_actions/qdq_selector_action_transformer.cc

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,8 @@ void DropQDQNodesRules(SelectorActionRegistry& qdq_selector_action_registry) {
7777
true,
7878
cpu_ep);
7979
qdq_selector_action_registry.RegisterSelectorAndAction(drop_action_no_int16_name,
80-
{{"Resize", {}}},
80+
{{"DepthToSpace", {}},
81+
{"Resize", {}}},
8182
std::move(selector_no_16bit),
8283
std::move(drop_action_no_int16));
8384

@@ -91,7 +92,7 @@ void DropQDQNodesRules(SelectorActionRegistry& qdq_selector_action_registry) {
9192
std::move(drop_action_no_int16_and_positive_scale));
9293

9394
std::unique_ptr<NodeSelector> selector = std::make_unique<QDQ::DropQDQNodesSelector>(true, false, true, providers);
94-
// DepthToSpace and SpaceToDepth not included because there are no integer implementations.
95+
// SpaceToDepth not included because there are no integer implementations.
9596
// https://github.com/microsoft/onnxruntime/issues/21287
9697
qdq_selector_action_registry.RegisterSelectorAndAction(drop_action_name,
9798
{{"Expand", {}},

onnxruntime/core/providers/cpu/tensor/space_depth_ops.cc

Lines changed: 17 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,17 @@ ONNX_CPU_OPERATOR_VERSIONED_KERNEL(
4343
12,
4444
KernelDefBuilder()
4545
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
46-
DataTypeImpl::GetTensorType<double>()}),
46+
DataTypeImpl::GetTensorType<double>(),
47+
DataTypeImpl::GetTensorType<uint8_t>()}),
4748
DepthToSpace);
4849

4950
ONNX_CPU_OPERATOR_KERNEL(
5051
DepthToSpace,
5152
13,
5253
KernelDefBuilder()
5354
.TypeConstraint("T", {DataTypeImpl::GetTensorType<float>(),
54-
DataTypeImpl::GetTensorType<double>()}),
55+
DataTypeImpl::GetTensorType<double>(),
56+
DataTypeImpl::GetTensorType<uint8_t>()}),
5557
DepthToSpace);
5658

5759
// intermediate tensor shapes are:
@@ -196,6 +198,19 @@ Status DepthToSpace::Compute(OpKernelContext* context) const {
196198
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
197199
onnxruntime::narrow<std::ptrdiff_t>(input_width),
198200
onnxruntime::narrow<std::ptrdiff_t>(blocksize_));
201+
} else if (input.IsDataType<uint8_t>()) {
202+
SpaceDepthOpCpuImpl<uint8_t>(input, output, permutation,
203+
onnxruntime::narrow<std::ptrdiff_t>(batch),
204+
onnxruntime::narrow<std::ptrdiff_t>(dim1),
205+
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
206+
onnxruntime::narrow<std::ptrdiff_t>(dim3),
207+
onnxruntime::narrow<std::ptrdiff_t>(input_height),
208+
onnxruntime::narrow<std::ptrdiff_t>(input_width),
209+
onnxruntime::narrow<std::ptrdiff_t>(input_depth / blocksize_ / blocksize_),
210+
onnxruntime::narrow<std::ptrdiff_t>(input_height),
211+
onnxruntime::narrow<std::ptrdiff_t>(blocksize_),
212+
onnxruntime::narrow<std::ptrdiff_t>(input_width),
213+
onnxruntime::narrow<std::ptrdiff_t>(blocksize_));
199214
} else {
200215
// user will not see this as the kernel doesn't claim support for types other than float and double
201216
return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Unsupported input type in DepthToSpace op: ", input.DataType());

onnxruntime/test/providers/cpu/tensor/space_depth_ops_test.cc

Lines changed: 36 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ template <typename T>
1313
class TensorOpTest : public ::testing::Test {
1414
};
1515

16-
using TensorOpTestTypes = ::testing::Types<float, MLFloat16>;
16+
using TensorOpTestTypes = ::testing::Types<float, MLFloat16, uint8_t>;
1717
TYPED_TEST_SUITE(TensorOpTest, TensorOpTestTypes);
1818

1919
TEST(TensorOpTest, SpaceToDepthTest_1) {
@@ -224,6 +224,7 @@ TEST(TensorOpTest, DepthToSpaceTest_1_double) {
224224
test.AddOutput<double>("output", {N, C / (blocksize * blocksize), H * blocksize, W * blocksize}, result);
225225
test.Run();
226226
}
227+
227228
TEST(TensorOpTest, DepthToSpaceTest_2) {
228229
OpTester test("DepthToSpace", 7); // create an opset 7 model
229230
constexpr int64_t blocksize = 2;
@@ -308,14 +309,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_3) {
308309
if constexpr (std::is_same<TypeParam, float>::value) {
309310
test.AddInput<float>("input", {N, C, H, W}, X);
310311
test.AddOutput<float>("output", {2, 3, 6, 4}, result);
311-
} else {
312+
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
312313
std::vector<TypeParam> X_fp16(X.size());
313314
std::vector<TypeParam> result_fp16(result.size());
314-
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
315315
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
316-
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
316+
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
317317
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
318+
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
319+
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
320+
std::vector<uint8_t> X_u8(X.size());
321+
std::vector<uint8_t> result_u8(result.size());
322+
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
323+
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
324+
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
325+
test.AddOutput<uint8_t>("output", {2, 3, 6, 4}, result_u8);
326+
} else {
327+
ORT_THROW("Type not supported");
318328
}
329+
319330
// TODO: Test is flaky on QNN EP (CPU backend).
320331
// Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky test is fixed.
321332
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kQnnExecutionProvider});
@@ -363,13 +374,22 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_4) {
363374
if constexpr (std::is_same<TypeParam, float>::value) {
364375
test.AddInput<float>("input", {N, C, H, W}, X);
365376
test.AddOutput<float>("output", {2, 3, 6, 4}, result);
366-
} else {
377+
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
367378
std::vector<TypeParam> X_fp16(X.size());
368379
std::vector<TypeParam> result_fp16(result.size());
369380
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
370381
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
371382
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
372383
test.AddOutput<TypeParam>("output", {2, 3, 6, 4}, result_fp16);
384+
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
385+
std::vector<uint8_t> X_u8(X.size());
386+
std::vector<uint8_t> result_u8(result.size());
387+
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
388+
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
389+
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
390+
test.AddOutput<uint8_t>("output", {2, 3, 6, 4}, result_u8);
391+
} else {
392+
ORT_THROW("Type not supported");
373393
}
374394

375395
// TODO: Test is flaky on QNN EP (CPU backend).
@@ -401,14 +421,24 @@ TYPED_TEST(TensorOpTest, DepthToSpaceTest_5) {
401421
if constexpr (std::is_same<TypeParam, float>::value) {
402422
test.AddInput<float>("input", {N, C, H, W}, X);
403423
test.AddOutput<float>("output", {1, 1, 4, 6}, result);
404-
} else {
424+
} else if constexpr (std::is_same<TypeParam, MLFloat16>::value) {
405425
std::vector<TypeParam> X_fp16(X.size());
406426
std::vector<TypeParam> result_fp16(result.size());
407427
ConvertFloatToMLFloat16(X.data(), X_fp16.data(), X.size());
408428
ConvertFloatToMLFloat16(result.data(), result_fp16.data(), result.size());
409429
test.AddInput<TypeParam>("input", {N, C, H, W}, X_fp16);
410430
test.AddOutput<TypeParam>("output", {1, 1, 4, 6}, result_fp16);
431+
} else if constexpr (std::is_same<TypeParam, uint8_t>::value) {
432+
std::vector<uint8_t> X_u8(X.size());
433+
std::vector<uint8_t> result_u8(result.size());
434+
ConvertFloatToUint8_t(X.data(), X_u8.data(), X.size());
435+
ConvertFloatToUint8_t(result.data(), result_u8.data(), result.size());
436+
test.AddInput<uint8_t>("input", {N, C, H, W}, X_u8);
437+
test.AddOutput<uint8_t>("output", {1, 1, 4, 6}, result_u8);
438+
} else {
439+
ORT_THROW("Type not supported");
411440
}
441+
412442
// TODO: Test is flaky on QNN EP (CPU backend).
413443
// Re-enable when the QnnCPUBackendTests.DISABLED_SpaceToDepth_Flaky2 test is fixed.
414444
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kQnnExecutionProvider});

onnxruntime/test/providers/provider_test_utils.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,12 @@ inline void ConvertFloatToMLFloat16(const float* f_datat, MLFloat16* h_data, siz
1515
output_vector = in_vector.template cast<Eigen::half>();
1616
}
1717

18+
inline void ConvertFloatToUint8_t(const float* f_datat, uint8_t* u8_data, size_t input_size) {
19+
auto in_vector = ConstEigenVectorMap<float>(f_datat, input_size);
20+
auto output_vector = EigenVectorMap<uint8_t>(static_cast<uint8_t*>(static_cast<void*>(u8_data)), input_size);
21+
output_vector = in_vector.template cast<uint8_t>();
22+
}
23+
1824
inline void ConvertMLFloat16ToFloat(const MLFloat16* h_data, float* f_data, size_t input_size) {
1925
auto in_vector =
2026
ConstEigenVectorMap<Eigen::half>(static_cast<const Eigen::half*>(static_cast<const void*>(h_data)), input_size);

0 commit comments

Comments
 (0)