Skip to content

Commit c1892de

Browse files
authored
Add support for the padding variations of conv op (#3883)
ConvOp defined with padding = "same"/"valid" produces the padding variant of the op, such as `conv2d.padding` for 2d convolution. This PR adds these conv variations to torch-mlir registry and a decomposition of these ops to `aten.convolution` to be able to go through the different pass pipelines.
1 parent 92d0f04 commit c1892de

File tree

7 files changed

+459
-0
lines changed

7 files changed

+459
-0
lines changed

include/torch-mlir/Dialect/Torch/IR/GeneratedTorchOps.td

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6684,6 +6684,35 @@ def Torch_AtenConv3dOp : Torch_Op<"aten.conv3d", [
66846684
}];
66856685
}
66866686

6687+
def Torch_AtenConv3dPaddingOp : Torch_Op<"aten.conv3d.padding", [
6688+
AllowsTypeRefinement,
6689+
HasValueSemantics,
6690+
ReadOnly
6691+
]> {
6692+
let summary = "Generated op for `aten::conv3d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`";
6693+
let arguments = (ins
6694+
AnyTorchTensorType:$input,
6695+
AnyTorchTensorType:$weight,
6696+
AnyTorchOptionalTensorType:$bias,
6697+
AnyTorchListOfTorchIntType:$stride,
6698+
Torch_StringType:$padding,
6699+
AnyTorchListOfTorchIntType:$dilation,
6700+
Torch_IntType:$groups
6701+
);
6702+
let results = (outs
6703+
AnyTorchOptionalTensorType:$result
6704+
);
6705+
let hasCustomAssemblyFormat = 1;
6706+
let extraClassDefinition = [{
6707+
ParseResult AtenConv3dPaddingOp::parse(OpAsmParser &parser, OperationState &result) {
6708+
return parseDefaultTorchOp(parser, result, 7, 1);
6709+
}
6710+
void AtenConv3dPaddingOp::print(OpAsmPrinter &printer) {
6711+
printDefaultTorchOp(printer, *this, 7, 1);
6712+
}
6713+
}];
6714+
}
6715+
66876716
def Torch_AtenConv2dOp : Torch_Op<"aten.conv2d", [
66886717
AllowsTypeRefinement,
66896718
HasValueSemantics,
@@ -6713,6 +6742,35 @@ def Torch_AtenConv2dOp : Torch_Op<"aten.conv2d", [
67136742
}];
67146743
}
67156744

6745+
def Torch_AtenConv2dPaddingOp : Torch_Op<"aten.conv2d.padding", [
6746+
AllowsTypeRefinement,
6747+
HasValueSemantics,
6748+
ReadOnly
6749+
]> {
6750+
let summary = "Generated op for `aten::conv2d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`";
6751+
let arguments = (ins
6752+
AnyTorchTensorType:$input,
6753+
AnyTorchTensorType:$weight,
6754+
AnyTorchOptionalTensorType:$bias,
6755+
AnyTorchListOfTorchIntType:$stride,
6756+
Torch_StringType:$padding,
6757+
AnyTorchListOfTorchIntType:$dilation,
6758+
Torch_IntType:$groups
6759+
);
6760+
let results = (outs
6761+
AnyTorchOptionalTensorType:$result
6762+
);
6763+
let hasCustomAssemblyFormat = 1;
6764+
let extraClassDefinition = [{
6765+
ParseResult AtenConv2dPaddingOp::parse(OpAsmParser &parser, OperationState &result) {
6766+
return parseDefaultTorchOp(parser, result, 7, 1);
6767+
}
6768+
void AtenConv2dPaddingOp::print(OpAsmPrinter &printer) {
6769+
printDefaultTorchOp(printer, *this, 7, 1);
6770+
}
6771+
}];
6772+
}
6773+
67166774
def Torch_AtenConv1dOp : Torch_Op<"aten.conv1d", [
67176775
AllowsTypeRefinement,
67186776
HasValueSemantics,
@@ -6742,6 +6800,35 @@ def Torch_AtenConv1dOp : Torch_Op<"aten.conv1d", [
67426800
}];
67436801
}
67446802

6803+
def Torch_AtenConv1dPaddingOp : Torch_Op<"aten.conv1d.padding", [
6804+
AllowsTypeRefinement,
6805+
HasValueSemantics,
6806+
ReadOnly
6807+
]> {
6808+
let summary = "Generated op for `aten::conv1d.padding : (Tensor, Tensor, Tensor?, int[], str, int[], int) -> (Tensor)`";
6809+
let arguments = (ins
6810+
AnyTorchTensorType:$input,
6811+
AnyTorchTensorType:$weight,
6812+
AnyTorchOptionalTensorType:$bias,
6813+
AnyTorchListOfTorchIntType:$stride,
6814+
Torch_StringType:$padding,
6815+
AnyTorchListOfTorchIntType:$dilation,
6816+
Torch_IntType:$groups
6817+
);
6818+
let results = (outs
6819+
AnyTorchOptionalTensorType:$result
6820+
);
6821+
let hasCustomAssemblyFormat = 1;
6822+
let extraClassDefinition = [{
6823+
ParseResult AtenConv1dPaddingOp::parse(OpAsmParser &parser, OperationState &result) {
6824+
return parseDefaultTorchOp(parser, result, 7, 1);
6825+
}
6826+
void AtenConv1dPaddingOp::print(OpAsmPrinter &printer) {
6827+
printDefaultTorchOp(printer, *this, 7, 1);
6828+
}
6829+
}];
6830+
}
6831+
67456832
def Torch_AtenConvTranspose1dOp : Torch_Op<"aten.conv_transpose1d", [
67466833
AllowsTypeRefinement,
67476834
HasValueSemantics,

lib/Dialect/Torch/Transforms/AbstractInterpLibrary.cpp

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10024,10 +10024,65 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1002410024
" %0 = call @__torch__.torch.jit._shape_functions.conv2d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
1002510025
" return %0 : !torch.list<int>\n"
1002610026
" }\n"
10027+
" func.func @\"__torch_mlir_shape_fn.aten.conv2d.padding\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.str, %arg5: !torch.list<int>, %arg6: !torch.int) -> !torch.list<int> {\n"
10028+
" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list<int>, !torch.list<int>, !torch.str) -> !torch.list<int>\n"
10029+
" %1 = call @__torch__.torch.jit._shape_functions.conv2d(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %arg6) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
10030+
" return %1 : !torch.list<int>\n"
10031+
" }\n"
10032+
" func.func @__torch__._conv_padding(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.str) -> !torch.list<int> {\n"
10033+
" %true = torch.constant.bool true\n"
10034+
" %int-1 = torch.constant.int -1\n"
10035+
" %str = torch.constant.str \"same\"\n"
10036+
" %none = torch.constant.none\n"
10037+
" %str_0 = torch.constant.str \"AssertionError: conv: weight must be at least 3 dimensional.\"\n"
10038+
" %int2 = torch.constant.int 2\n"
10039+
" %int0 = torch.constant.int 0\n"
10040+
" %int1 = torch.constant.int 1\n"
10041+
" %0 = torch.aten.len.t %arg0 : !torch.list<int> -> !torch.int\n"
10042+
" %1 = torch.aten.gt.int %0, %int2 : !torch.int, !torch.int -> !torch.bool\n"
10043+
" torch.prim.If %1 -> () {\n"
10044+
" torch.prim.If.yield\n"
10045+
" } else {\n"
10046+
" torch.prim.RaiseException %str_0, %none : !torch.str, !torch.none\n"
10047+
" torch.prim.If.yield\n"
10048+
" }\n"
10049+
" %2 = torch.aten.sub.int %0, %int2 : !torch.int, !torch.int -> !torch.int\n"
10050+
" %3 = torch.prim.ListConstruct %int0 : (!torch.int) -> !torch.list<int>\n"
10051+
" %4 = torch.aten.mul.left_t %3, %2 : !torch.list<int>, !torch.int -> !torch.list<int>\n"
10052+
" %5 = torch.aten.eq.str %arg2, %str : !torch.str, !torch.str -> !torch.bool\n"
10053+
" torch.prim.If %5 -> () {\n"
10054+
" %6 = torch.aten.sub.int %2, %int1 : !torch.int, !torch.int -> !torch.int\n"
10055+
" %7 = torch.aten.len.t %arg1 : !torch.list<int> -> !torch.int\n"
10056+
" %8 = torch.aten.__range_length %6, %int-1, %int-1 : !torch.int, !torch.int, !torch.int -> !torch.int\n"
10057+
" %9 = torch.prim.ListConstruct %7, %8 : (!torch.int, !torch.int) -> !torch.list<int>\n"
10058+
" %10 = torch.prim.min.self_int %9 : !torch.list<int> -> !torch.int\n"
10059+
" torch.prim.Loop %10, %true, init() {\n"
10060+
" ^bb0(%arg3: !torch.int):\n"
10061+
" %11 = torch.aten.__getitem__.t %arg1, %arg3 : !torch.list<int>, !torch.int -> !torch.int\n"
10062+
" %12 = torch.aten.__derive_index %arg3, %6, %int-1 : !torch.int, !torch.int, !torch.int -> !torch.int\n"
10063+
" %13 = torch.aten.add.int %int2, %12 : !torch.int, !torch.int -> !torch.int\n"
10064+
" %14 = torch.aten.__getitem__.t %arg0, %13 : !torch.list<int>, !torch.int -> !torch.int\n"
10065+
" %15 = torch.aten.sub.int %14, %int1 : !torch.int, !torch.int -> !torch.int\n"
10066+
" %16 = torch.aten.mul.int %11, %15 : !torch.int, !torch.int -> !torch.int\n"
10067+
" %17 = torch.aten.floordiv.int %16, %int2 : !torch.int, !torch.int -> !torch.int\n"
10068+
" %18 = torch.aten._set_item.t %4, %12, %17 : !torch.list<int>, !torch.int, !torch.int -> !torch.list<int>\n"
10069+
" torch.prim.Loop.condition %true, iter()\n"
10070+
" } : (!torch.int, !torch.bool) -> ()\n"
10071+
" torch.prim.If.yield\n"
10072+
" } else {\n"
10073+
" torch.prim.If.yield\n"
10074+
" }\n"
10075+
" return %4 : !torch.list<int>\n"
10076+
" }\n"
1002710077
" func.func @\"__torch_mlir_shape_fn.aten.conv3d\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.int) -> !torch.list<int> {\n"
1002810078
" %0 = call @__torch__.torch.jit._shape_functions.conv3d(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %arg6) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
1002910079
" return %0 : !torch.list<int>\n"
1003010080
" }\n"
10081+
" func.func @\"__torch_mlir_shape_fn.aten.conv3d.padding\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.str, %arg5: !torch.list<int>, %arg6: !torch.int) -> !torch.list<int> {\n"
10082+
" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list<int>, !torch.list<int>, !torch.str) -> !torch.list<int>\n"
10083+
" %1 = call @__torch__.torch.jit._shape_functions.conv3d(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %arg6) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
10084+
" return %1 : !torch.list<int>\n"
10085+
" }\n"
1003110086
" func.func @\"__torch_mlir_shape_fn.aten.conv_transpose2d.input\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.int, %arg7: !torch.list<int>) -> !torch.list<int> {\n"
1003210087
" %0 = torch.derefine %arg3 : !torch.list<int> to !torch.optional<list<int>>\n"
1003310088
" %1 = torch.derefine %arg4 : !torch.list<int> to !torch.optional<list<int>>\n"
@@ -10097,6 +10152,14 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
1009710152
" %1 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %arg4, %arg5, %false, %0, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
1009810153
" return %1 : !torch.list<int>\n"
1009910154
" }\n"
10155+
" func.func @\"__torch_mlir_shape_fn.aten.conv1d.padding\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.str, %arg5: !torch.list<int>, %arg6: !torch.int) -> !torch.list<int> {\n"
10156+
" %false = torch.constant.bool false\n"
10157+
" %int1 = torch.constant.int 1\n"
10158+
" %0 = call @__torch__._conv_padding(%arg1, %arg5, %arg4) : (!torch.list<int>, !torch.list<int>, !torch.str) -> !torch.list<int>\n"
10159+
" %1 = torch.prim.ListConstruct : () -> !torch.list<int>\n"
10160+
" %2 = call @__torch__.torch.jit._shape_functions.conv_forwards(%arg0, %arg1, %arg2, %arg3, %0, %arg5, %false, %1, %int1) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>\n"
10161+
" return %2 : !torch.list<int>\n"
10162+
" }\n"
1010010163
" func.func @\"__torch_mlir_shape_fn.aten.conv_transpose1d\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.optional<list<int>>, %arg3: !torch.list<int>, %arg4: !torch.list<int>, %arg5: !torch.list<int>, %arg6: !torch.int, %arg7: !torch.list<int>) -> !torch.list<int> {\n"
1010110164
" %true = torch.constant.bool true\n"
1010210165
" %0 = call @\"__torch_mlir_shape_fn.aten.convolution\"(%arg0, %arg1, %arg2, %arg3, %arg4, %arg7, %true, %arg5, %arg6) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.list<int>\n"

lib/Dialect/Torch/Transforms/DecomposeComplexOps.cpp

Lines changed: 82 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5175,6 +5175,82 @@ class DecomposeAtenConv2dOp : public OpRewritePattern<AtenConv2dOp> {
51755175
};
51765176
} // namespace
51775177

5178+
// Decompose aten.conv(1/2/3)d.padding to aten.convolution
5179+
namespace {
5180+
template <typename ConvPaddingOp>
5181+
class DecomposeAtenConvPaddingOp : public OpRewritePattern<ConvPaddingOp> {
5182+
public:
5183+
using OpRewritePattern<ConvPaddingOp>::OpRewritePattern;
5184+
LogicalResult matchAndRewrite(ConvPaddingOp op,
5185+
PatternRewriter &rewriter) const override {
5186+
5187+
Location loc = op.getLoc();
5188+
5189+
Value weight = op.getWeight();
5190+
std::optional<unsigned> maybeRank = getTensorRank(weight);
5191+
if (!maybeRank) {
5192+
return rewriter.notifyMatchFailure(op, "expected weight to have a rank");
5193+
}
5194+
unsigned rank = *maybeRank;
5195+
// first 2 dimensions of weight are out_channels and in_channels / groups
5196+
if (rank < 3)
5197+
return rewriter.notifyMatchFailure(
5198+
op, "ConvPaddingOp weight must be at least 3 dimensional.");
5199+
5200+
std::string padding_str;
5201+
if (!matchPattern(op.getPadding(), m_TorchConstantStr(padding_str)))
5202+
return rewriter.notifyMatchFailure(op,
5203+
"padding must be a constant string");
5204+
5205+
Value zero = rewriter.create<Torch::ConstantIntOp>(
5206+
loc, rewriter.getI64IntegerAttr(0));
5207+
5208+
SmallVector<Value> paddingValues;
5209+
if (padding_str == "valid") {
5210+
// valid means no padding
5211+
for (unsigned iRank = 2; iRank < rank; iRank++) {
5212+
paddingValues.push_back(zero);
5213+
}
5214+
} else {
5215+
5216+
SmallVector<Value> dilation;
5217+
getListConstructElements(op.getDilation(), dilation);
5218+
5219+
Value one =
5220+
rewriter.create<ConstantIntOp>(loc, rewriter.getI64IntegerAttr(1));
5221+
Value two =
5222+
rewriter.create<ConstantIntOp>(loc, rewriter.getI64IntegerAttr(2));
5223+
for (unsigned iRank = 2; iRank < rank; iRank++) {
5224+
Value dim = rewriter.create<Torch::ConstantIntOp>(
5225+
loc, rewriter.getI64IntegerAttr(iRank));
5226+
Value kernelSize =
5227+
rewriter.create<Torch::AtenSizeIntOp>(loc, weight, dim);
5228+
Value kernelSizeMinusOne =
5229+
rewriter.create<Torch::AtenSubIntOp>(loc, kernelSize, one);
5230+
Value padding = rewriter.create<Torch::AtenMulIntOp>(
5231+
loc, dilation[iRank - 2], kernelSizeMinusOne);
5232+
padding = rewriter.create<AtenFloordivIntOp>(loc, padding, two);
5233+
paddingValues.push_back(padding);
5234+
}
5235+
}
5236+
5237+
Value emptyList = rewriter.create<PrimListConstructOp>(
5238+
op.getLoc(), Torch::ListType::get(Torch::IntType::get(op.getContext())),
5239+
SmallVector<Value>());
5240+
Value cstFalse = rewriter.create<Torch::ConstantBoolOp>(op.getLoc(), false);
5241+
Value padding = rewriter.create<PrimListConstructOp>(
5242+
op.getLoc(), Torch::ListType::get(Torch::IntType::get(op.getContext())),
5243+
paddingValues);
5244+
rewriter.replaceOpWithNewOp<AtenConvolutionOp>(
5245+
op, op->getResultTypes(), op.getInput(), op.getWeight(), op.getBias(),
5246+
op.getStride(), padding, op.getDilation(), cstFalse, emptyList,
5247+
op.getGroups());
5248+
5249+
return success();
5250+
}
5251+
};
5252+
} // namespace
5253+
51785254
// Decompose aten.conv3d to aten.convolution
51795255
namespace {
51805256
class DecomposeAtenConv3dOp : public OpRewritePattern<AtenConv3dOp> {
@@ -11377,6 +11453,12 @@ class DecomposeComplexOpsPass
1137711453
addPatternIfTargetOpIsIllegal<DecomposeAtenConv1dOp>(patterns);
1137811454
addPatternIfTargetOpIsIllegal<DecomposeAtenConv2dOp>(patterns);
1137911455
addPatternIfTargetOpIsIllegal<DecomposeAtenConv3dOp>(patterns);
11456+
addPatternIfTargetOpIsIllegal<
11457+
DecomposeAtenConvPaddingOp<AtenConv1dPaddingOp>>(patterns);
11458+
addPatternIfTargetOpIsIllegal<
11459+
DecomposeAtenConvPaddingOp<AtenConv2dPaddingOp>>(patterns);
11460+
addPatternIfTargetOpIsIllegal<
11461+
DecomposeAtenConvPaddingOp<AtenConv3dPaddingOp>>(patterns);
1138011462
addPatternIfTargetOpIsIllegal<DecomposeAtenThresholdOp>(patterns);
1138111463
addPatternIfTargetOpIsIllegal<DecomposeAtenFloatPowerTensorTensorOp>(
1138211464
patterns);

projects/pt1/e2e_testing/xfail_sets.py

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2065,6 +2065,8 @@
20652065
"Conv2dWithPaddingDilationStrideStaticModule_depthwise",
20662066
"Conv2dWithPaddingDilationStrideStaticModule_depthwise_multiplier",
20672067
"Conv2dWithPaddingModule_basic",
2068+
"Conv2dWithValidPaddingModule_basic",
2069+
"Conv2dWithSamePaddingModule_basic",
20682070
"Convolution2DStaticModule_basic",
20692071
"CosineSimilarityStaticModule_basic",
20702072
"DetachModule_basic",
@@ -2557,6 +2559,8 @@
25572559
"Conv2dNoPaddingModule_basic",
25582560
"Conv2dWithPaddingDilationStrideModule_basic",
25592561
"Conv2dWithPaddingModule_basic",
2562+
"Conv2dWithSamePaddingModule_basic",
2563+
"Conv2dWithValidPaddingModule_basic",
25602564
# failed to legalize operation 'torch.operator'
25612565
"ElementwisePreluModule_basic",
25622566
"ElementwisePreluStaticModule_basic",
@@ -2886,6 +2890,8 @@
28862890
"ContainsIntList_False",
28872891
"ContainsIntList_True",
28882892
"Conv1dModule_basic",
2893+
"Conv1dWithSamePaddingModule_basic",
2894+
"Conv1dWithValidPaddingModule_basic",
28892895
"Conv2dBiasNoPaddingModule_basic",
28902896
"Conv2dModule_basic",
28912897
"Conv2dNoPaddingModule_basic",
@@ -2898,7 +2904,11 @@
28982904
"Conv2dQInt8PerChannelModule_grouped",
28992905
"Conv2dWithPaddingDilationStrideModule_basic",
29002906
"Conv2dWithPaddingModule_basic",
2907+
"Conv2dWithSamePaddingModule_basic",
2908+
"Conv2dWithValidPaddingModule_basic",
29012909
"Conv3dModule_basic",
2910+
"Conv3dWithSamePaddingModule_basic",
2911+
"Conv3dWithValidPaddingModule_basic",
29022912
"ConvTbcModule_basic",
29032913
"ConvTranspose2DQInt8_basic",
29042914
"Conv_Transpose2dModule_basic",
@@ -3585,6 +3595,8 @@
35853595
"ContainsIntList_True",
35863596
"Conv1dModule_basic",
35873597
"Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic",
3598+
"Conv1dWithSamePaddingModule_basic",
3599+
"Conv1dWithValidPaddingModule_basic",
35883600
"Conv2dQInt8Module_basic",
35893601
"Conv2dQInt8Module_depthwise",
35903602
"Conv2dQInt8Module_grouped",
@@ -3595,6 +3607,8 @@
35953607
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
35963608
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
35973609
"Conv3dModule_basic",
3610+
"Conv3dWithSamePaddingModule_basic",
3611+
"Conv3dWithValidPaddingModule_basic",
35983612
"ConvTbcModule_basic",
35993613
"ConvTranspose2DQInt8_basic",
36003614
"Conv_Transpose2dModule_basic",
@@ -4178,6 +4192,8 @@
41784192
"ContainsIntList_False",
41794193
"ContainsIntList_True",
41804194
"Conv1dModule_basic",
4195+
"Conv1dWithSamePaddingModule_basic",
4196+
"Conv1dWithValidPaddingModule_basic",
41814197
"Conv1dDepthwiseWithPaddingDilationStrideStaticModule_basic",
41824198
"Conv2dBiasNoPaddingModule_basic",
41834199
"Conv2dModule_basic",
@@ -4193,7 +4209,11 @@
41934209
"Conv2dWithPaddingDilationStrideStaticModule_grouped",
41944210
"Conv2dWithPaddingDilationStrideStaticModule_grouped_multiplier",
41954211
"Conv2dWithPaddingModule_basic",
4212+
"Conv2dWithSamePaddingModule_basic",
4213+
"Conv2dWithValidPaddingModule_basic",
41964214
"Conv3dModule_basic",
4215+
"Conv3dWithSamePaddingModule_basic",
4216+
"Conv3dWithValidPaddingModule_basic",
41974217
"ConvTbcModule_basic",
41984218
"ConvTranspose2DQInt8_basic",
41994219
"Conv_Transpose2dModule_basic",

0 commit comments

Comments
 (0)