@@ -274,6 +274,62 @@ func.func @test_gemm_alpha_beta(%arg0: !torch.vtensor<[3,5],f32>, %arg1: !torch.
274
274
275
275
// -----
276
276
277
+ // CHECK-LABEL: func.func @test_lppool_2d
278
+ func.func @test_lppool_2d (%arg0: !torch.vtensor <[1 ,3 ,32 ,32 ],f32 >) -> !torch.vtensor <[1 ,3 ,31 ,31 ],f32 > attributes {torch.onnx_meta.ir_version = 7 : si64 , torch.onnx_meta.opset_version = 22 : si64 } {
279
+ // CHECK: %[[I1:.*]] = torch.constant.int 1
280
+ // CHECK: %[[I2:.*]] = torch.constant.int 2
281
+ // CHECK: %[[NE:.*]] = torch.aten.mul %[[I2]], %[[I1]] : !torch.int, !torch.int -> !torch.int
282
+ // CHECK: %[[I2_1:.*]] = torch.constant.int 2
283
+ // CHECK: %[[NE1:.*]] = torch.aten.mul %[[I2_1]], %[[NE]] : !torch.int, !torch.int -> !torch.int
284
+ // CHECK: %[[K:.*]] = torch.prim.ListConstruct %[[I2]], %[[I2_1]] : (!torch.int, !torch.int) -> !torch.list<int>
285
+ // CHECK: %[[I0:.*]] = torch.constant.int 0
286
+ // CHECK: %[[I0_1:.*]] = torch.constant.int 0
287
+ // CHECK: %[[I0_2:.*]] = torch.constant.int 0
288
+ // CHECK: %[[I0_3:.*]] = torch.constant.int 0
289
+ // CHECK: %[[PAD:.*]] = torch.prim.ListConstruct %[[I0]], %[[I0_1]], %[[I0_2]], %[[I0_3]] : (!torch.int, !torch.int, !torch.int, !torch.int) -> !torch.list<int>
290
+ // CHECK: %[[I1_1:.*]] = torch.constant.int 1
291
+ // CHECK: %[[I1_2:.*]] = torch.constant.int 1
292
+ // CHECK: %[[STR:.*]] = torch.prim.ListConstruct %[[I1_1]], %[[I1_2]] : (!torch.int, !torch.int) -> !torch.list<int>
293
+ // CHECK: %[[CEIL:.*]] = torch.constant.bool false
294
+ // CHECK: %[[CIP:.*]] = torch.constant.bool true
295
+ // CHECK: %[[P:.*]] = torch.constant.int 2
296
+ // CHECK: %[[ABS:.*]] = torch.aten.abs %arg0 : !torch.vtensor<[1,3,32,32],f32> -> !torch.vtensor<[1,3,32,32],f32>
297
+ // CHECK: %[[POW:.*]] = torch.aten.pow.Tensor_Scalar %[[ABS]], %[[P]] : !torch.vtensor<[1,3,32,32],f32>, !torch.int -> !torch.vtensor<[1,3,32,32],f32>
298
+ // CHECK: %[[AVG:.*]] = torch.aten.avg_pool2d %[[POW]], %[[K]], %[[STR]], %[[PAD]], %[[CEIL]], %[[CIP]], %[[I1]] : !torch.vtensor<[1,3,32,32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool, !torch.int -> !torch.vtensor<[1,3,31,31],f32>
299
+ // CHECK: %[[INVP:.*]] = torch.constant.float 5.000000e-01
300
+ // CHECK: torch.aten.pow.Tensor_Scalar %[[AVG]], %[[INVP]] : !torch.vtensor<[1,3,31,31],f32>, !torch.float -> !torch.vtensor<[1,3,31,31],f32>
301
+ %0 = torch.operator " onnx.LpPool" (%arg0 ) {torch.onnx.kernel_shape = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[1 ,3 ,32 ,32 ],f32 >) -> !torch.vtensor <[1 ,3 ,31 ,31 ],f32 >
302
+ return %0 : !torch.vtensor <[1 ,3 ,31 ,31 ],f32 >
303
+ }
304
+
305
+ // -----
306
+
307
+ // CHECK-LABEL: func.func @test_lppool_1d
308
+ func.func @test_lppool_1d (%arg0: !torch.vtensor <[1 ,3 ,32 ],f32 >) -> !torch.vtensor <[1 ,3 ,31 ],f32 > attributes {torch.onnx_meta.ir_version = 7 : si64 , torch.onnx_meta.opset_version = 22 : si64 } {
309
+ // CHECK: %[[I1:.*]] = torch.constant.int 1
310
+ // CHECK: %[[I2:.*]] = torch.constant.int 2
311
+ // CHECK: %[[NE:.*]] = torch.aten.mul %[[I2]], %[[I1]] : !torch.int, !torch.int -> !torch.int
312
+ // CHECK: %[[K:.*]] = torch.prim.ListConstruct %[[I2]] : (!torch.int) -> !torch.list<int>
313
+ // CHECK: %[[I0:.*]] = torch.constant.int 0
314
+ // CHECK: %[[I0_1:.*]] = torch.constant.int 0
315
+ // CHECK: %[[PAD:.*]] = torch.prim.ListConstruct %[[I0]], %[[I0_1]] : (!torch.int, !torch.int) -> !torch.list<int>
316
+ // CHECK: %[[I1_1:.*]] = torch.constant.int 1
317
+ // CHECK: %[[STR:.*]] = torch.prim.ListConstruct %[[I1_1]] : (!torch.int) -> !torch.list<int>
318
+ // CHECK: %[[CEIL:.*]] = torch.constant.bool false
319
+ // CHECK: %[[CIP:.*]] = torch.constant.bool true
320
+ // CHECK: %[[P:.*]] = torch.constant.int 2
321
+ // CHECK: %[[ABS:.*]] = torch.aten.abs %arg0 : !torch.vtensor<[1,3,32],f32> -> !torch.vtensor<[1,3,32],f32>
322
+ // CHECK: %[[POW:.*]] = torch.aten.pow.Tensor_Scalar %[[ABS]], %[[P]] : !torch.vtensor<[1,3,32],f32>, !torch.int -> !torch.vtensor<[1,3,32],f32>
323
+ // CHECK: %[[AVG:.*]] = torch.aten.avg_pool1d %[[POW]], %[[K]], %[[STR]], %[[PAD]], %[[CEIL]], %[[CIP]] : !torch.vtensor<[1,3,32],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.bool -> !torch.vtensor<[1,3,31],f32>
324
+ // CHECK: %[[POW_0:.*]] = torch.aten.mul.Scalar %[[AVG]], %[[NE]] : !torch.vtensor<[1,3,31],f32>, !torch.int -> !torch.vtensor<[1,3,31],f32>
325
+ // CHECK: %[[INVP:.*]] = torch.constant.float 5.000000e-01
326
+ // CHECK: torch.aten.pow.Tensor_Scalar %[[POW_0]], %[[INVP]] : !torch.vtensor<[1,3,31],f32>, !torch.float -> !torch.vtensor<[1,3,31],f32>
327
+ %0 = torch.operator " onnx.LpPool" (%arg0 ) {torch.onnx.kernel_shape = [2 : si64 ]} : (!torch.vtensor <[1 ,3 ,32 ],f32 >) -> !torch.vtensor <[1 ,3 ,31 ],f32 >
328
+ return %0 : !torch.vtensor <[1 ,3 ,31 ],f32 >
329
+ }
330
+
331
+ // -----
332
+
277
333
// CHECK-LABEL : func.func @test_layer_norm
278
334
func.func @test_layer_norm (%arg0: !torch.vtensor <[3 ,4 ],f32 >, %arg1: !torch.vtensor <[3 ,4 ],f32 >, %arg2: !torch.vtensor <[3 ,4 ],f32 >) -> (!torch.vtensor <[3 ,4 ], f32 >, !torch.vtensor <[1 ,1 ],f32 >, !torch.vtensor <[1 ,1 ],f32 >)
279
335
attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 17 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
0 commit comments