@@ -34,6 +34,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
34
34
const logging::Logger& logger) const {
35
35
const auto & op_type = node.OpType ();
36
36
const auto & input_defs = node.InputDefs ();
37
+ const auto & output_defs = node.OutputDefs ();
37
38
ORT_RETURN_IF_NOT (input_defs.size () >= 2 , op_type, " requires at least two inputs." );
38
39
39
40
emscripten::val input = model_builder.GetOperand (input_defs[0 ]->Name ());
@@ -45,7 +46,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
45
46
options.set (" label" , node.Name ());
46
47
47
48
std::vector<int64_t > scale_shape;
48
- ORT_RETURN_IF_NOT (GetShape (*input_defs[1 ], scale_shape, logger), " Cannot get scale shape" );
49
+ const size_t scale_input_index = op_type == " SkipSimplifiedLayerNormalization" ? 2 : 1 ;
50
+ ORT_RETURN_IF_NOT (GetShape (*input_defs[scale_input_index], scale_shape, logger), " Cannot get scale shape" );
49
51
const auto scale_size = scale_shape.size ();
50
52
// Except LayerNormalization, other normalization ops' scale input should be 1-D.
51
53
if (op_type == " LayerNormalization" ) {
@@ -55,19 +57,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
55
57
ORT_RETURN_IF_NOT (scale_size == 1 , " The scale size should be one." );
56
58
}
57
59
58
- if (input_defs.size () >= 3 && !input_defs[2 ]->Name ().empty ()) {
60
+ emscripten::val scale = model_builder.GetOperand (input_defs[scale_input_index]->Name ());
61
+ options.set (" scale" , scale);
62
+
63
+ const size_t bias_input_index = op_type == " SkipSimplifiedLayerNormalization" ? 3 : 2 ;
64
+ emscripten::val bias = emscripten::val::undefined ();
65
+ if (TensorExists (input_defs, bias_input_index)) {
59
66
// Bias input exists, and bias's shape should be the same as scale's shape.
60
67
std::vector<int64_t > bias_shape;
61
- ORT_RETURN_IF_NOT (GetShape (*input_defs[2 ], bias_shape, logger), " Cannot get bias shape" );
68
+ ORT_RETURN_IF_NOT (GetShape (*input_defs[bias_input_index ], bias_shape, logger), " Cannot get bias shape" );
62
69
ORT_RETURN_IF_NOT (bias_shape == scale_shape, " The bias' shape should be equal to scale's shape." );
63
- }
64
-
65
- emscripten::val scale = model_builder.GetOperand (input_defs[1 ]->Name ());
66
- options.set (" scale" , scale);
67
-
68
- if (input_defs.size () >= 3 && !input_defs[2 ]->Name ().empty ()) {
69
- // Bias input exists, and bias's shape is the same as scale's shape.
70
- emscripten::val bias = model_builder.GetOperand (input_defs[2 ]->Name ());
70
+ bias = model_builder.GetOperand (input_defs[bias_input_index]->Name ());
71
71
options.set (" bias" , bias);
72
72
}
73
73
@@ -76,6 +76,8 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
76
76
options.set (" epsilon" , epsilon);
77
77
78
78
emscripten::val output = emscripten::val::undefined ();
79
+ // SkipSimplifiedLayerNormalization's output: input_skip_bias_sum.
80
+ emscripten::val input_skip_bias_sum = emscripten::val::undefined ();
79
81
if (op_type == " BatchNormalization" ) {
80
82
ORT_RETURN_IF_NOT (input_defs.size () == 5 , " BatchNormalization requires five inputs." );
81
83
emscripten::val mean = model_builder.GetOperand (input_defs[3 ]->Name ());
@@ -85,7 +87,9 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
85
87
}
86
88
87
89
output = model_builder.GetBuilder ().call <emscripten::val>(" batchNormalization" , input, mean, variance, options);
88
- } else if (op_type == " LayerNormalization" || op_type == " SimplifiedLayerNormalization" ) {
90
+ } else if (op_type == " LayerNormalization" ||
91
+ op_type == " SimplifiedLayerNormalization" ||
92
+ op_type == " SkipSimplifiedLayerNormalization" ) {
89
93
int64_t axis = helper.Get (" axis" , -1 );
90
94
axis = HandleNegativeAxis (axis, rank);
91
95
std::vector<uint32_t > axes (rank - SafeInt<uint32_t >(axis));
@@ -94,13 +98,17 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
94
98
if (op_type == " LayerNormalization" ) {
95
99
options.set (" axes" , emscripten::val::array (axes));
96
100
output = model_builder.GetBuilder ().call <emscripten::val>(" layerNormalization" , input, options);
97
- } else { // SimplifiedLayerNormalization
101
+ } else { // SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization
98
102
/* *
99
- WebNN doesn't support SimplifiedLayerNormalization. So decompose it into a series of ops:
100
- X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul
101
- ^ ^ ^ ^ ^
102
- | | | | |
103
- Y:2 axis B:epsilon A:X A:scale
103
+ WebNN doesn't support SimplifiedLayerNormalization or SkipSimplifiedLayerNormalization.
104
+ So decompose it into a series of ops:
105
+ X --> Pow --> ReduceMean --> Add --> Sqrt --> Div -> Mul -> Add (optional)
106
+ ^ ^ ^ ^ ^ ^
107
+ | | | | | |
108
+ Y:2 axis B:epsilon A:X A:scale B:bias
109
+
110
+ If it is SkipSimplifiedLayerNormalization and its output input_skip_bias_sum exists,
111
+ input_skip_bias_sum = X + skip + bias (if it exists)
104
112
*/
105
113
106
114
int32_t input_type;
@@ -137,6 +145,25 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
137
145
// Mul
138
146
common_options.set (" label" , node.Name () + " _mul" );
139
147
output = model_builder.GetBuilder ().call <emscripten::val>(" mul" , scale, div , common_options);
148
+
149
+ // Add (if bias exits)
150
+ if (!bias.isUndefined ()) {
151
+ common_options.set (" label" , node.Name () + " _add_bias" );
152
+ output = model_builder.GetBuilder ().call <emscripten::val>(" add" , output, bias, common_options);
153
+ }
154
+
155
+ // SkipSimplifiedLayerNormalization's output input_skip_bias_sum is the sum of input, skip, and bias.
156
+ if (op_type == " SkipSimplifiedLayerNormalization" && TensorExists (output_defs, 3 )) {
157
+ emscripten::val skip = model_builder.GetOperand (input_defs[1 ]->Name ());
158
+ common_options.set (" label" , node.Name () + " _add_skip" );
159
+ input_skip_bias_sum = model_builder.GetBuilder ().call <emscripten::val>(" add" , input, skip, common_options);
160
+ if (!bias.isUndefined ()) {
161
+ common_options.set (" label" , node.Name () + " _add_skip_bias" );
162
+ input_skip_bias_sum = model_builder.GetBuilder ().call <emscripten::val>(
163
+ " add" , input_skip_bias_sum, bias, common_options);
164
+ }
165
+ model_builder.AddOperand (output_defs[3 ]->Name (), std::move (input_skip_bias_sum));
166
+ }
140
167
}
141
168
} else if (op_type == " InstanceNormalization" ) {
142
169
// WebNN spec only supports 4D input for instanceNormalization.
@@ -188,7 +215,7 @@ Status NormalizationOpBuilder::AddToModelBuilderImpl(ModelBuilder& model_builder
188
215
} else {
189
216
return ORT_MAKE_STATUS (ONNXRUNTIME, INVALID_ARGUMENT, " Unsupported normalization op: " , op_type);
190
217
}
191
- model_builder.AddOperand (node. OutputDefs () [0 ]->Name (), std::move (output));
218
+ model_builder.AddOperand (output_defs [0 ]->Name (), std::move (output));
192
219
193
220
return Status::OK ();
194
221
}
@@ -215,9 +242,21 @@ bool NormalizationOpBuilder::IsOpSupportedImpl(const InitializedTensorSet& initi
215
242
}
216
243
217
244
const auto & output_defs = node.OutputDefs ();
218
- if (output_defs.size () != 1 ) {
219
- LOGS (logger, VERBOSE) << op_type << " output count must be one." ;
220
- return false ;
245
+ if (op_type == " SkipSimplifiedLayerNormalization" ) {
246
+ if (output_defs.size () > 4 ) {
247
+ LOGS (logger, VERBOSE) << " SkipSimplifiedLayerNormalization output count must not exceed 4." ;
248
+ return false ;
249
+ }
250
+ if (TensorExists (output_defs, 1 ) || TensorExists (output_defs, 2 )) {
251
+ // Output mean and inv_std_var are used for training mode, which is not supported.
252
+ LOGS (logger, VERBOSE) << " SkipSimplifiedLayerNormalization's output mean and inv_std_var are not supported." ;
253
+ return false ;
254
+ }
255
+ } else {
256
+ if (output_defs.size () != 1 ) {
257
+ LOGS (logger, VERBOSE) << op_type << " output count must be one." ;
258
+ return false ;
259
+ }
221
260
}
222
261
223
262
if (op_type == " BatchNormalization" && helper.Get (" training_mode" , 0 )) {
@@ -238,9 +277,9 @@ bool NormalizationOpBuilder::HasSupportedInputsImpl(const InitializedTensorSet&
238
277
int32_t input2_type; // B data type
239
278
int32_t input3_type; // mean data type
240
279
int32_t input4_type; // var data type
241
- bool has_input2 = input_defs. size () > 2 && input_defs[ 2 ]-> Exists ( );
242
- bool has_input3 = input_defs. size () > 3 && input_defs[ 3 ]-> Exists ( );
243
- bool has_input4 = input_defs. size () > 3 && input_defs[ 4 ]-> Exists ( );
280
+ bool has_input2 = TensorExists (input_defs, 2 );
281
+ bool has_input3 = TensorExists (input_defs, 3 );
282
+ bool has_input4 = TensorExists ( input_defs, 4 );
244
283
245
284
if (!GetType (*input_defs[0 ], input0_type, logger) ||
246
285
!GetType (*input_defs[1 ], input1_type, logger) ||
@@ -277,6 +316,7 @@ void CreateNormalizationOpBuilder(const std::string& op_type, OpBuilderRegistrat
277
316
" InstanceNormalization" ,
278
317
" LayerNormalization" ,
279
318
" SimplifiedLayerNormalization" ,
319
+ " SkipSimplifiedLayerNormalization" ,
280
320
};
281
321
282
322
op_registrations.builders .push_back (std::make_unique<NormalizationOpBuilder>());
0 commit comments