Skip to content

Commit 2d05c4b

Browse files
authored
[WebNN] Support SkipSimplifiedLayerNormalization op (microsoft#23151)
The algorithm of `SkipSimplifiedLayerNormalization` is quite similar to the `SimplifiedLayerNormalization`, only different is `SkipSimplifiedLayerNormalization` provides an additional output used for calculating the sum of the input, skip and bias (if it exits). BTW, fix a bug in `SimplifiedLayerNormalization`, adding bias if it exits.
1 parent a9a881c commit 2d05c4b

11 files changed

+107
-60
lines changed

js/web/docs/webnn-operators.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,10 @@ operators and the supported opset domain/versions in **WebNN EP** by ONNX Runtim
8989
| ScatterElements | ai.onnx(11-12, 13-15, 16-17, 18+) | scatterElements ||| Only supports 'reduction' == 'none' |
9090
| ScatterND | ai.onnx(11-12, 13-15, 16-17, 18+) | scatterND ||| Only supports 'reduction' == 'none' |
9191
| Shape | ai.onnx(7-12, 13-14, 15-18, 19-20, 21+) | slice ||| |
92-
| SimplifiedLayerNormalization | ai.onnx(1+) | pow + reduceMean + add + sqrt + div + mul ||| |
92+
| SimplifiedLayerNormalization | ai.onnx(1+) | pow, reduceMean, add, sqrt, div, mul ||| |
9393
| Sigmoid | ai.onnx(7-12, 13+) | sigmoid ||| |
9494
| Sign | ai.onnx(9-12, 13+) | sign ||| |
95+
| SkipSimplifiedLayerNormalization | com.microsoft(1+) | pow, reduceMean, add, sqrt, div, mul ||| |
9596
| Softplus | ai.onnx(7+) | softplus ||| |
9697
| Softsign | ai.onnx(7+) | softsign ||| |
9798
| Sin | ai.onnx(7+) | sin ||| |

onnxruntime/core/providers/webnn/builders/helper.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -181,6 +181,10 @@ inline bool IsEmptyTensor(const InitializedTensorSet& initializers, const std::s
181181
return std::any_of(dims.begin(), dims.end(), [](auto d) { return d == 0; });
182182
}
183183

184+
inline bool TensorExists(const ConstPointerContainer<std::vector<NodeArg*>>& defs, size_t tensor_index) noexcept {
185+
return tensor_index < defs.size() && defs[tensor_index]->Exists();
186+
}
187+
184188
bool IsTensorShapeSupported(const NodeArg& node_arg, const std::string& parent_name,
185189
const logging::Logger& logger, bool allow_empty_input = false);
186190

@@ -278,6 +282,7 @@ static const InlinedHashMap<std::string, std::string> op_map = {
278282
{"Softplus", "softplus"},
279283
{"Softsign", "softsign"},
280284
{"Sin", "sin"},
285+
{"SkipSimplifiedLayerNormalization", "layerNormalization"},
281286
{"Slice", "slice"},
282287
{"Softmax", "softmax"},
283288
{"Split", "split"},

onnxruntime/core/providers/webnn/builders/impl/conv_op_builder.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -405,8 +405,8 @@ bool ConvOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
405405
int32_t input1_type; // weight data type
406406
int32_t input2_type; // bias or x_zero_point data type
407407
int32_t input3_type; // w_zero_point data type
408-
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
409-
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
408+
bool has_input2 = TensorExists(input_defs, 2);
409+
bool has_input3 = TensorExists(input_defs, 3);
410410

411411
if (!GetType(*input_defs[0], input0_type, logger) ||
412412
!GetType(*input_defs[1], input1_type, logger) ||

onnxruntime/core/providers/webnn/builders/impl/einsum_op_builder.cc

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -742,7 +742,7 @@ bool EinsumOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* init
742742
const auto& op_type = node.OpType();
743743
int32_t input0_type;
744744
int32_t input1_type;
745-
bool has_input1 = input_defs.size() > 1 && input_defs[1]->Exists();
745+
bool has_input1 = TensorExists(input_defs, 1);
746746

747747
if (!GetType(*input_defs[0], input0_type, logger) ||
748748
(has_input1 && !GetType(*input_defs[1], input1_type, logger))) {

onnxruntime/core/providers/webnn/builders/impl/gemm_op_builder.cc

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -223,8 +223,8 @@ bool GemmOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
223223
int32_t input1_type; // B data type
224224
int32_t input2_type; // C or a_zero_point data type
225225
int32_t input3_type; // b_zero_point data type
226-
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
227-
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
226+
bool has_input2 = TensorExists(input_defs, 2);
227+
bool has_input3 = TensorExists(input_defs, 3);
228228

229229
if (!GetType(*input_defs[0], input0_type, logger) ||
230230
!GetType(*input_defs[1], input1_type, logger) ||

onnxruntime/core/providers/webnn/builders/impl/gru_op_builder.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class GruOpBuilder : public BaseOpBuilder {
3333
};
3434

3535
void GruOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
36-
if (node.InputDefs().size() > 4 && node.InputDefs()[4]->Exists()) {
36+
if (TensorExists(node.InputDefs(), 4)) {
3737
model_builder.AddInitializerToSkip(node.InputDefs()[4]->Name()); // sequence_lens
3838
model_builder.AddInputToSkip(node.InputDefs()[4]->Name());
3939
}
@@ -56,7 +56,7 @@ Status GruOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const No
5656
options.set("label", node.Name());
5757
options.set("layout", emscripten::val("zrn"));
5858

59-
if (input_defs.size() > 3 && input_defs[3]->Exists()) {
59+
if (TensorExists(input_defs, 3)) {
6060
emscripten::val bias = model_builder.GetOperand(input_defs[3]->Name());
6161
emscripten::val split_options = emscripten::val::object();
6262
split_options.set("label", node.Name() + "_split");
@@ -68,16 +68,16 @@ Status GruOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const No
6868
options.set("recurrentBias", splitted_biases[1]);
6969
}
7070

71-
if (input_defs.size() > 5 && input_defs[5]->Exists()) {
71+
if (TensorExists(input_defs, 5)) {
7272
options.set("initialHiddenState", model_builder.GetOperand(input_defs[5]->Name()));
7373
}
7474

7575
bool linear_before_reset = !!helper.Get("linear_before_reset ", 0);
7676
options.set("resetAfter", linear_before_reset);
7777

7878
const auto& output_defs = node.OutputDefs();
79-
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
80-
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
79+
bool has_Y = TensorExists(output_defs, 0);
80+
bool has_Y_h = TensorExists(output_defs, 1);
8181
options.set("returnSequence", has_Y);
8282

8383
std::string direction = helper.Get("direction", "forward");
@@ -134,7 +134,7 @@ bool GruOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers, c
134134
}
135135
int32_t steps = static_cast<int32_t>(input_shape[0]);
136136

137-
if (input_defs.size() > 4 && input_defs[4]->Exists()) {
137+
if (TensorExists(input_defs, 4)) {
138138
if (!Contains(initializers, input_defs[4]->Name())) {
139139
LOGS(logger, ERROR) << "GRU: sequence_lens must be constant";
140140
return false;
@@ -196,8 +196,8 @@ bool GruOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initial
196196
int32_t input_R_type = 0; // recurrent weight data type
197197
int32_t input_B_type = 0; // bias data type
198198
int32_t input_initial_h_type = 0; // initial hidden state data type
199-
bool has_input_B = input_defs.size() > 3 && input_defs[3]->Exists();
200-
bool has_input_initial_h = input_defs.size() > 5 && input_defs[5]->Exists();
199+
bool has_input_B = TensorExists(input_defs, 3);
200+
bool has_input_initial_h = TensorExists(input_defs, 5);
201201

202202
if (!GetType(*input_defs[0], input_X_type, logger) ||
203203
!GetType(*input_defs[1], input_W_type, logger) ||
@@ -229,8 +229,8 @@ bool GruOpBuilder::HasSupportedOutputsImpl(const Node& node,
229229
const auto& op_type = node.OpType();
230230
int32_t Y_type = 0;
231231
int32_t Y_h_type = 0;
232-
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
233-
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
232+
bool has_Y = TensorExists(output_defs, 0);
233+
bool has_Y_h = TensorExists(output_defs, 1);
234234

235235
bool Y_supported = has_Y && GetType(*output_defs[0], Y_type, logger);
236236
bool Y_h_supported = has_Y_h && GetType(*output_defs[1], Y_h_type, logger);

onnxruntime/core/providers/webnn/builders/impl/lstm_op_builder.cc

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ class LstmOpBuilder : public BaseOpBuilder {
3232
};
3333

3434
void LstmOpBuilder::AddInitializersToSkip(ModelBuilder& model_builder, const Node& node) const {
35-
if (node.InputDefs().size() > 4 && node.InputDefs()[4]->Exists()) {
35+
if (TensorExists(node.InputDefs(), 4)) {
3636
model_builder.AddInitializerToSkip(node.InputDefs()[4]->Name()); // sequence_lens
3737
model_builder.AddInputToSkip(node.InputDefs()[4]->Name());
3838
}
@@ -56,7 +56,7 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
5656
options.set("label", node.Name());
5757
options.set("layout", emscripten::val("iofg"));
5858

59-
if (input_defs.size() > 3 && input_defs[3]->Exists()) {
59+
if (TensorExists(input_defs, 3)) {
6060
emscripten::val bias = model_builder.GetOperand(input_defs[3]->Name());
6161
emscripten::val split_options = emscripten::val::object();
6262
split_options.set("axis", 1);
@@ -67,13 +67,13 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
6767
options.set("bias", splitted_biases[0]);
6868
options.set("recurrentBias", splitted_biases[1]);
6969
}
70-
if (input_defs.size() > 5 && input_defs[5]->Exists()) {
70+
if (TensorExists(input_defs, 5)) {
7171
options.set("initialHiddenState", model_builder.GetOperand(input_defs[5]->Name()));
7272
}
73-
if (input_defs.size() > 6 && input_defs[6]->Exists()) {
73+
if (TensorExists(input_defs, 6)) {
7474
options.set("initialCellState", model_builder.GetOperand(input_defs[6]->Name()));
7575
}
76-
if (input_defs.size() > 7 && input_defs[7]->Exists()) {
76+
if (TensorExists(input_defs, 7)) {
7777
options.set("peepholeWeight", model_builder.GetOperand(input_defs[7]->Name()));
7878
}
7979

@@ -87,9 +87,9 @@ Status LstmOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder, const N
8787
}
8888

8989
const auto& output_defs = node.OutputDefs();
90-
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
91-
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
92-
bool has_Y_c = output_defs.size() > 2 && output_defs[2]->Exists();
90+
bool has_Y = TensorExists(output_defs, 0);
91+
bool has_Y_h = TensorExists(output_defs, 1);
92+
bool has_Y_c = TensorExists(output_defs, 2);
9393
options.set("returnSequence", has_Y);
9494

9595
if (helper.HasAttr("activations")) {
@@ -140,7 +140,7 @@ bool LstmOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initializers,
140140
}
141141
int32_t steps = static_cast<int32_t>(input_shape[0]);
142142

143-
if (input_defs.size() > 4 && input_defs[4]->Exists()) {
143+
if (TensorExists(input_defs, 4)) {
144144
if (!Contains(initializers, input_defs[4]->Name())) {
145145
LOGS(logger, ERROR) << "LSTM: sequence_lens must be constant";
146146
return false;
@@ -210,10 +210,10 @@ bool LstmOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet& /* initia
210210
int32_t input5_type = 0; // initialHiddenState data type
211211
int32_t input6_type = 0; // initialCellState data type
212212
int32_t input7_type = 0; // peepholeWeight data type
213-
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
214-
bool has_input5 = input_defs.size() > 5 && input_defs[5]->Exists();
215-
bool has_input6 = input_defs.size() > 6 && input_defs[6]->Exists();
216-
bool has_input7 = input_defs.size() > 7 && input_defs[7]->Exists();
213+
bool has_input3 = TensorExists(input_defs, 3);
214+
bool has_input5 = TensorExists(input_defs, 5);
215+
bool has_input6 = TensorExists(input_defs, 6);
216+
bool has_input7 = TensorExists(input_defs, 7);
217217

218218
if (!GetType(*input_defs[0], input0_type, logger) ||
219219
!GetType(*input_defs[1], input1_type, logger) ||
@@ -253,9 +253,9 @@ bool LstmOpBuilder::HasSupportedOutputsImpl(const Node& node,
253253
int32_t Y_type = 0;
254254
int32_t Y_h_type = 0;
255255
int32_t Y_c_type = 0;
256-
bool has_Y = output_defs.size() > 0 && output_defs[0]->Exists();
257-
bool has_Y_h = output_defs.size() > 1 && output_defs[1]->Exists();
258-
bool has_Y_c = output_defs.size() > 2 && output_defs[2]->Exists();
256+
bool has_Y = TensorExists(output_defs, 0);
257+
bool has_Y_h = TensorExists(output_defs, 1);
258+
bool has_Y_c = TensorExists(output_defs, 2);
259259

260260
if (has_Y && GetType(*output_defs[0], Y_type, logger)) {
261261
return IsDataTypeSupportedByOp(op_type, Y_type, wnn_limits, "outputs", "Y", logger);

onnxruntime/core/providers/webnn/builders/impl/normalization_op_builder.cc

Lines changed: 65 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
3434
const logging::Logger& logger) const {
3535
const auto& op_type = node.OpType();
3636
const auto& input_defs = node.InputDefs();
37+
const auto& output_defs = node.OutputDefs();
3738
ORT_RETURN_IF_NOT(input_defs.size() >= 2, op_type, " requires at least two inputs.");
3839

3940
emscripten::val input = model_builder.GetOperand(input_defs[0]->Name());
@@ -45,7 +46,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
4546
options.set("label", node.Name());
4647

4748
std::vector<int64_t> scale_shape;
48-
ORT_RETURN_IF_NOT(GetShape(*input_defs[1], scale_shape, logger), "Cannot get scale shape");
49+
const size_t scale_input_index = op_type == "SkipSimplifiedLayerNormalization" ? 2 : 1;
50+
ORT_RETURN_IF_NOT(GetShape(*input_defs[scale_input_index], scale_shape, logger), "Cannot get scale shape");
4951
const auto scale_size = scale_shape.size();
5052
// Except LayerNormalization, other normalization ops' scale input should be 1-D.
5153
if (op_type == "LayerNormalization") {
@@ -55,19 +57,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
5557
ORT_RETURN_IF_NOT(scale_size == 1, "The scale size should be one.");
5658
}
5759

58-
if (input_defs.size() >= 3 && !input_defs[2]->Name().empty()) {
60+
emscripten::val scale = model_builder.GetOperand(input_defs[scale_input_index]->Name());
61+
options.set("scale", scale);
62+
63+
const size_t bias_input_index = op_type == "SkipSimplifiedLayerNormalization" ? 3 : 2;
64+
emscripten::val bias = emscripten::val::undefined();
65+
if (TensorExists(input_defs, bias_input_index)) {
5966
// Bias input exists, and bias's shape should be the same as scale's shape.
6067
std::vector<int64_t> bias_shape;
61-
ORT_RETURN_IF_NOT(GetShape(*input_defs[2], bias_shape, logger), "Cannot get bias shape");
68+
ORT_RETURN_IF_NOT(GetShape(*input_defs[bias_input_index], bias_shape, logger), "Cannot get bias shape");
6269
ORT_RETURN_IF_NOT(bias_shape == scale_shape, "The bias' shape should be equal to scale's shape.");
63-
}
64-
65-
emscripten::val scale = model_builder.GetOperand(input_defs[1]->Name());
66-
options.set("scale", scale);
67-
68-
if (input_defs.size() >= 3 && !input_defs[2]->Name().empty()) {
69-
// Bias input exists, and bias's shape is the same as scale's shape.
70-
emscripten::val bias = model_builder.GetOperand(input_defs[2]->Name());
70+
bias = model_builder.GetOperand(input_defs[bias_input_index]->Name());
7171
options.set("bias", bias);
7272
}
7373

@@ -76,6 +76,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
7676
options.set("epsilon", epsilon);
7777

7878
emscripten::val output = emscripten::val::undefined();
79+
// SkipSimplifiedLayerNormalization's output: input_skip_bias_sum.
80+
emscripten::val input_skip_bias_sum = emscripten::val::undefined();
7981
if (op_type == "BatchNormalization") {
8082
ORT_RETURN_IF_NOT(input_defs.size() == 5, "BatchNormalization requires five inputs.");
8183
emscripten::val mean = model_builder.GetOperand(input_defs[3]->Name());
@@ -85,7 +87,9 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
8587
}
8688

8789
output = model_builder.GetBuilder().call<emscripten::val>("batchNormalization", input, mean, variance, options);
88-
} else if (op_type == "LayerNormalization" || op_type == "SimplifiedLayerNormalization") {
90+
} else if (op_type == "LayerNormalization" ||
91+
op_type == "SimplifiedLayerNormalization" ||
92+
op_type == "SkipSimplifiedLayerNormalization") {
8993
int64_t axis = helper.Get("axis", -1);
9094
axis = HandleNegativeAxis(axis, rank);
9195
std::vector<uint32_t> axes(rank - SafeInt<uint32_t>(axis));
@@ -94,13 +98,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
9498
if (op_type == "LayerNormalization") {
9599
options.set("axes", emscripten::val::array(axes));
96100
output = model_builder.GetBuilder().call<emscripten::val>("layerNormalization", input, options);
97-
} else { // SimplifiedLayerNormalization
101+
} else { // SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization
98102
/**
99-
WebNN doesn't support SimplifiedLayerNormalization. So decompose it into a series of ops:
100-
X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul
101-
^ ^ ^ ^ ^
102-
| | | | |
103-
Y:2 axis B:epsilon A:X A:scale
103+
WebNN doesn't support SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization.
104+
So decompose it into a series of ops:
105+
X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul -> Add (optional)
106+
^ ^ ^ ^ ^ ^
107+
| | | | | |
108+
Y:2 axis B:epsilon A:X A:scale B:bias
109+
110+
If it is SkipSimplifiedLayerNormalization and its output input_skip_bias_sum exists,
111+
input_skip_bias_sum = X + skip + bias (if it exists)
104112
*/
105113

106114
int32_t input_type;
@@ -137,6 +145,25 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
137145
// Mul
138146
common_options.set("label", node.Name() + "_mul");
139147
output = model_builder.GetBuilder().call<emscripten::val>("mul", scale, div, common_options);
148+
149+
// Add (if bias exits)
150+
if (!bias.isUndefined()) {
151+
common_options.set("label", node.Name() + "_add_bias");
152+
output = model_builder.GetBuilder().call<emscripten::val>("add", output, bias, common_options);
153+
}
154+
155+
// SkipSimplifiedLayerNormalization's output input_skip_bias_sum is the sum of input, skip, and bias.
156+
if (op_type == "SkipSimplifiedLayerNormalization" && TensorExists(output_defs, 3)) {
157+
emscripten::val skip = model_builder.GetOperand(input_defs[1]->Name());
158+
common_options.set("label", node.Name() + "_add_skip");
159+
input_skip_bias_sum = model_builder.GetBuilder().call<emscripten::val>("add", input, skip, common_options);
160+
if (!bias.isUndefined()) {
161+
common_options.set("label", node.Name() + "_add_skip_bias");
162+
input_skip_bias_sum = model_builder.GetBuilder().call<emscripten::val>(
163+
"add", input_skip_bias_sum, bias, common_options);
164+
}
165+
model_builder.AddOperand(output_defs[3]->Name(), std::move(input_skip_bias_sum));
166+
}
140167
}
141168
} else if (op_type == "InstanceNormalization") {
142169
// WebNN spec only supports 4D input for instanceNormalization.
@@ -188,7 +215,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
188215
} else {
189216
return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Unsupported normalization op: ", op_type);
190217
}
191-
model_builder.AddOperand(node.OutputDefs()[0]->Name(), std::move(output));
218+
model_builder.AddOperand(output_defs[0]->Name(), std::move(output));
192219

193220
return Status::OK();
194221
}
@@ -215,9 +242,21 @@ bool NormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initi
215242
}
216243

217244
const auto& output_defs = node.OutputDefs();
218-
if (output_defs.size() != 1) {
219-
LOGS(logger, VERBOSE) << op_type << " output count must be one.";
220-
return false;
245+
if (op_type == "SkipSimplifiedLayerNormalization") {
246+
if (output_defs.size() > 4) {
247+
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization output count must not exceed 4.";
248+
return false;
249+
}
250+
if (TensorExists(output_defs, 1) || TensorExists(output_defs, 2)) {
251+
// Output mean and inv_std_var are used for training mode, which is not supported.
252+
LOGS(logger, VERBOSE) << "SkipSimplifiedLayerNormalization's output mean and inv_std_var are not supported.";
253+
return false;
254+
}
255+
} else {
256+
if (output_defs.size() != 1) {
257+
LOGS(logger, VERBOSE) << op_type << " output count must be one.";
258+
return false;
259+
}
221260
}
222261

223262
if (op_type == "BatchNormalization" && helper.Get("training_mode", 0)) {
@@ -238,9 +277,9 @@ bool NormalizationOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet&
238277
int32_t input2_type; // B data type
239278
int32_t input3_type; // mean data type
240279
int32_t input4_type; // var data type
241-
bool has_input2 = input_defs.size() > 2 && input_defs[2]->Exists();
242-
bool has_input3 = input_defs.size() > 3 && input_defs[3]->Exists();
243-
bool has_input4 = input_defs.size() > 3 && input_defs[4]->Exists();
280+
bool has_input2 = TensorExists(input_defs, 2);
281+
bool has_input3 = TensorExists(input_defs, 3);
282+
bool has_input4 = TensorExists(input_defs, 4);
244283

245284
if (!GetType(*input_defs[0], input0_type, logger) ||
246285
!GetType(*input_defs[1], input1_type, logger) ||
@@ -277,6 +316,7 @@ void CreateNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrat
277316
"InstanceNormalization",
278317
"LayerNormalization",
279318
"SimplifiedLayerNormalization",
319+
"SkipSimplifiedLayerNormalization",
280320
};
281321

282322
op_registrations.builders.push_back(std::make_unique<NormalizationOpBuilder>());

0 commit comments

Comments
 (0)