From 71d3262e90547ae6226c696de37592359603d29c Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Sat, 13 May 2023 13:33:59 -0500 Subject: [PATCH] [TOPI] Use f-strings for string formatting, NFC (#14839) --- python/tvm/topi/adreno/conv2d_alter_op.py | 77 +++++-------------- python/tvm/topi/adreno/pooling.py | 4 +- python/tvm/topi/arm_cpu/bitserial_dense.py | 2 +- python/tvm/topi/arm_cpu/tensor_intrin.py | 69 ++++------------- python/tvm/topi/bifrost/conv2d.py | 2 +- .../tvm/topi/cuda/batch_matmul_tensorcore.py | 2 +- python/tvm/topi/cuda/conv2d.py | 2 +- python/tvm/topi/cuda/conv2d_hwcn.py | 2 +- python/tvm/topi/cuda/conv3d.py | 2 +- python/tvm/topi/cuda/dense_tensorcore.py | 2 +- python/tvm/topi/cuda/pooling.py | 4 +- python/tvm/topi/cuda/reduction.py | 4 +- python/tvm/topi/generic/default.py | 2 +- python/tvm/topi/generic/injective.py | 2 +- .../topi/intel_graphics/conv2d_alter_op.py | 10 +-- python/tvm/topi/mali/conv2d.py | 18 +---- python/tvm/topi/nn/dilate.py | 2 +- python/tvm/topi/nn/pad.py | 12 +-- python/tvm/topi/nn/sparse.py | 2 +- python/tvm/topi/random/kernel.py | 13 ++-- python/tvm/topi/sparse/csrmm.py | 7 +- python/tvm/topi/sparse/csrmv.py | 7 +- python/tvm/topi/sparse/dense.py | 10 +-- python/tvm/topi/testing/dilate_python.py | 5 +- python/tvm/topi/utils.py | 21 +---- python/tvm/topi/x86/binarize_pack.py | 2 +- python/tvm/topi/x86/binary_dense.py | 2 +- python/tvm/topi/x86/bitserial_dense.py | 2 +- python/tvm/topi/x86/conv2d.py | 13 +--- python/tvm/topi/x86/conv2d_avx_common.py | 2 +- python/tvm/topi/x86/dense_alter_op.py | 15 +--- python/tvm/topi/x86/depthwise_conv2d.py | 6 +- python/tvm/topi/x86/pooling.py | 4 +- python/tvm/topi/x86/reduction.py | 4 +- 34 files changed, 101 insertions(+), 232 deletions(-) diff --git a/python/tvm/topi/adreno/conv2d_alter_op.py b/python/tvm/topi/adreno/conv2d_alter_op.py index cf72cc2a846e..47030606ddfb 100644 --- a/python/tvm/topi/adreno/conv2d_alter_op.py +++ b/python/tvm/topi/adreno/conv2d_alter_op.py @@ -130,8 +130,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, out_dtype], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( @@ -165,18 +164,17 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): if in_channel_block != 4 or num_filter_block != 4: new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, out_dtype], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( inputs[0], weight, **new_attrs ) - new_attrs["data_layout"] = "NCHW%dc" % in_channel_block + new_attrs["data_layout"] = f"NCHW{in_channel_block}c" # (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4) - new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block - new_attrs["out_layout"] = "NCHW%dc" % num_filter_block + new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o" + new_attrs["out_layout"] = f"NCHW{num_filter_block}c" # Store altered operator's config new_data = te.placeholder( (N, CI // in_channel_block, H, W, in_channel_block), dtype=data_dtype @@ -186,15 +184,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [ - new_data, - new_weight, - strides, - padding, - dilation, - out_dtype, - ], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( @@ -226,8 +216,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, out_dtype], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( @@ -259,18 +248,17 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): if in_channel_block != 4 or num_filter_block != 4: new_workload = autotvm.task.args_to_workload( - [new_data, new_weight, strides, padding, dilation, out_dtype], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( inputs[0], weight, **new_attrs ) - new_attrs["data_layout"] = "NHWC%dc" % in_channel_block + new_attrs["data_layout"] = f"NHWC{in_channel_block}c" # (oc, ic, h, w) -> (h, w, ic, oc // 4, oc % 4) - new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block - new_attrs["out_layout"] = "NHWC%dc" % num_filter_block + new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o" + new_attrs["out_layout"] = f"NHWC{num_filter_block}c" # Store altered operator's config new_data = te.placeholder( (N, H, W, CI // in_channel_block, in_channel_block), dtype=data_dtype @@ -280,15 +268,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [ - new_data, - new_weight, - strides, - padding, - dilation, - out_dtype, - ], - wkl_name, + [new_data, new_weight, strides, padding, dilation, out_dtype], wkl_name ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_conv2d_winograd_without_weight_transform( @@ -316,12 +296,12 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): # update new attrs new_attrs["channels"] = out_channel if in_channel_block == 4: - new_attrs["data_layout"] = "NCHW%dc" % in_channel_block + new_attrs["data_layout"] = f"NCHW{in_channel_block}c" else: new_attrs["data_layout"] = "NCHW" # (oc, ic, h, w) -> (OC, ic, h, w, oc) - new_attrs["kernel_layout"] = "OIHW%do" % num_filter_block - new_attrs["out_layout"] = "NCHW%dc" % num_filter_block + new_attrs["kernel_layout"] = f"OIHW{num_filter_block}o" + new_attrs["out_layout"] = f"NCHW{num_filter_block}c" # Store altered operator's config for applying of tuned AutoTVM statistics if in_channel_block == 4: @@ -336,14 +316,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [ - new_data, - new_kernel, - strides, - padding, - dilation, - out_dtype, - ], + [new_data, new_kernel, strides, padding, dilation, out_dtype], topi_tmpl, # "conv2d_nchwc.image2d", ) dispatch_ctx.update(target, new_workload, cfg) @@ -376,15 +349,15 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): # update new attrs new_attrs["channels"] = out_channles if in_channel_block == 4: - new_attrs["data_layout"] = "NHWC%dc" % in_channel_block + new_attrs["data_layout"] = f"NHWC{in_channel_block}c" else: new_attrs["data_layout"] = "NHWC" # (h, w, ic, oc) -> (h, w, ic, OC, oc) if kernel_layout == "HWIO": - new_attrs["kernel_layout"] = "HWIO%do" % num_filter_block + new_attrs["kernel_layout"] = f"HWIO{num_filter_block}o" else: - new_attrs["kernel_layout"] = "HWOI%do" % num_filter_block - new_attrs["out_layout"] = "NHWC%dc" % num_filter_block + new_attrs["kernel_layout"] = f"HWOI{num_filter_block}o" + new_attrs["out_layout"] = f"NHWC{num_filter_block}c" # Store altered operator's config for applying of tuned AutoTVM statistics if in_channel_block == 4: @@ -423,15 +396,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): dtype=kernel_tensor.dtype, ) new_workload = autotvm.task.args_to_workload( - [ - new_data, - new_kernel, - strides, - padding, - dilation, - out_dtype, - ], - topi_tmpl, + [new_data, new_kernel, strides, padding, dilation, out_dtype], topi_tmpl ) dispatch_ctx.update(target, new_workload, cfg) else: diff --git a/python/tvm/topi/adreno/pooling.py b/python/tvm/topi/adreno/pooling.py index f02af0c01fd2..c6eb35a4c9bd 100644 --- a/python/tvm/topi/adreno/pooling.py +++ b/python/tvm/topi/adreno/pooling.py @@ -120,7 +120,7 @@ def traverse(OP): Pool = OP.output(0) _schedule_global(Pool, layout) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) @@ -188,7 +188,7 @@ def traverse(OP): Pool = OP.output(0) _schedule(PaddedInput, Pool) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) diff --git a/python/tvm/topi/arm_cpu/bitserial_dense.py b/python/tvm/topi/arm_cpu/bitserial_dense.py index 8481b6c0a8ca..a9ce846cf163 100644 --- a/python/tvm/topi/arm_cpu/bitserial_dense.py +++ b/python/tvm/topi/arm_cpu/bitserial_dense.py @@ -205,7 +205,7 @@ def traverse(op): unipolar = output.op.tag == "bitserial_dense_unipolar" _schedule(cfg, s, data_vec, weight_vec, output, unipolar) else: - raise RuntimeError("Unsupported operator: %s" % op.tag) + raise RuntimeError(f"Unsupported operator: {op.tag}") traverse(outs[0].op) return s diff --git a/python/tvm/topi/arm_cpu/tensor_intrin.py b/python/tvm/topi/arm_cpu/tensor_intrin.py index 700639c10a1b..de38b944c27a 100644 --- a/python/tvm/topi/arm_cpu/tensor_intrin.py +++ b/python/tvm/topi/arm_cpu/tensor_intrin.py @@ -466,42 +466,36 @@ def dot_int8_int8_int32_neon_82(int32_lanes, dtype="uint"): """ num_int8_elements = 4 # 4 int8 elements in int32 - data = te.placeholder((num_int8_elements,), dtype="%s8" % dtype, name="data") - kernel = te.placeholder((int32_lanes, num_int8_elements), dtype="%s8" % dtype, name="kernel") + data = te.placeholder((num_int8_elements,), dtype=f"{dtype}8", name="data") + kernel = te.placeholder((int32_lanes, num_int8_elements), dtype=f"{dtype}8", name="kernel") k = te.reduce_axis((0, num_int8_elements), name="k") C = te.compute( (int32_lanes,), - lambda i: te.sum( - data[k].astype("%s32" % dtype) * kernel[i, k].astype("%s32" % dtype), axis=k - ), + lambda i: te.sum(data[k].astype(f"{dtype}32") * kernel[i, k].astype(f"{dtype}32"), axis=k), name="C", ) a_buffer = tvm.tir.decl_buffer( - data.shape, dtype="%s8" % dtype, name="a_buffer", offset_factor=1, strides=[1] + data.shape, dtype=f"{dtype}8", name="a_buffer", offset_factor=1, strides=[1] ) b_buffer = tvm.tir.decl_buffer( - kernel.shape, - dtype="%s8" % dtype, - name="b_buffer", - offset_factor=1, - strides=[te.var("s"), 1], + kernel.shape, dtype=f"{dtype}8", name="b_buffer", offset_factor=1, strides=[te.var("s"), 1] ) def _intrin_func(ins, outs): def _instr(index): ib = tvm.tir.ir_builder.create() if index == 1: - ib.emit(outs[0].vstore(0, tvm.tir.const(0, "%s32x%d" % (dtype, int32_lanes)))) + ib.emit(outs[0].vstore(0, tvm.tir.const(0, f"{dtype}32x{int32_lanes}"))) return ib.get() - dtype_a = "%s8x%d" % (dtype, num_int8_elements) - dtype_b = "%s8x%d" % (dtype, int32_lanes * num_int8_elements) - dtype_c = "%s32x%d" % (dtype, int32_lanes) + dtype_a = f"{dtype}8x{num_int8_elements}" + dtype_b = f"{dtype}8x{int32_lanes * num_int8_elements}" + dtype_c = f"{dtype}32x{int32_lanes}" a_int8 = ins[0].vload([0], dtype_a) - re_int32 = tvm.tir.call_intrin("%s32" % dtype, "tir.reinterpret", a_int8) + re_int32 = tvm.tir.call_intrin(f"{dtype}32", "tir.reinterpret", a_int8) # broadcast a vec_ai32 = re_int32.astype(dtype_c) @@ -805,12 +799,7 @@ def _instr(index): # a*2+b*6+c*10+d*14, # a*3+b*7+c*11+d*15] vdot = tvm.tir.call_llvm_intrin( - "int32x4", - llvm_intrin, - tvm.tir.const(3, "uint32"), - vec_c, - vec_b, - vec_aa[i], + "int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_b, vec_aa[i] ) # Store the result @@ -885,11 +874,7 @@ def gemm_acc_nx16_int8_int8_int32(dtype, rows): A.shape, dtype, name="aa_buffer", offset_factor=1, strides=[te.var("sa"), 1] ) bb_buffer = tvm.tir.decl_buffer( - B.shape, - dtype, - name="bb_buffer", - offset_factor=1, - strides=[te.var("sb0"), te.var("sb1"), 1], + B.shape, dtype, name="bb_buffer", offset_factor=1, strides=[te.var("sb0"), te.var("sb1"), 1] ) cc_buffer = tvm.tir.decl_buffer( C.shape, dtype="int32", name="cc_buffer", offset_factor=1, strides=[te.var("sc"), 1] @@ -936,12 +921,7 @@ def _instr(index): # a*2+b*18+c*34+d*50, # a*3+b*19+c*35+d*51] vdot = tvm.tir.call_llvm_intrin( - "int32x4", - llvm_intrin, - tvm.tir.const(3, "uint32"), - vec_c, - vec_b, - vec_aa, + "int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_b, vec_aa ) ib.emit(outs[0].vstore([k, 4 * j], vdot)) return ib.get() @@ -977,27 +957,17 @@ def smlal_int16_int32(): A = te.placeholder((int16_lanes,), dtype="int16", name="A") B = te.placeholder((int16_lanes, 1), dtype="int16", name="B") C = te.compute( - (int16_lanes,), - lambda i: A[i].astype("int32") * B[i, 0].astype("int32"), - name="C", + (int16_lanes,), lambda i: A[i].astype("int32") * B[i, 0].astype("int32"), name="C" ) a_buffer = tvm.tir.decl_buffer( A.shape, dtype="int16", name="a_buffer", offset_factor=1, strides=[1] ) b_buffer = tvm.tir.decl_buffer( - B.shape, - dtype="int16", - name="b_buffer", - offset_factor=1, - strides=[te.var("sb"), 1], + B.shape, dtype="int16", name="b_buffer", offset_factor=1, strides=[te.var("sb"), 1] ) c_buffer = tvm.tir.decl_buffer( - C.shape, - dtype="int32", - name="c_buffer", - offset_factor=1, - strides=[1], + C.shape, dtype="int32", name="c_buffer", offset_factor=1, strides=[1] ) def _intrin_func(ins, outs): @@ -1122,12 +1092,7 @@ def _instr(index): # i*1 + j*3 + k*5 + l*7 +m*9 + n*11 + o*13 + p*15] vec_c = outs[0].vload([0, 0], "int32x4") vmmla = tvm.tir.call_llvm_intrin( - "int32x4", - llvm_intrin, - tvm.tir.const(3, "uint32"), - vec_c, - vec_a, - vec_b, + "int32x4", llvm_intrin, tvm.tir.const(3, "uint32"), vec_c, vec_a, vec_b ) # Store the result ib.emit(outs[0].vstore([0, 0], vmmla)) diff --git a/python/tvm/topi/bifrost/conv2d.py b/python/tvm/topi/bifrost/conv2d.py index 633f36c0e7ff..30d39b476946 100644 --- a/python/tvm/topi/bifrost/conv2d.py +++ b/python/tvm/topi/bifrost/conv2d.py @@ -509,7 +509,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): CO, _, KH, KW = get_const_tuple(kernel.shape) VC = cfg["tile_co"].size[-1] - new_attrs["kernel_layout"] = "OIHW%do" % VC + new_attrs["kernel_layout"] = f"OIHW{VC}o" new_data = data new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) diff --git a/python/tvm/topi/cuda/batch_matmul_tensorcore.py b/python/tvm/topi/cuda/batch_matmul_tensorcore.py index 8e4868b3895d..920f162b103a 100644 --- a/python/tvm/topi/cuda/batch_matmul_tensorcore.py +++ b/python/tvm/topi/cuda/batch_matmul_tensorcore.py @@ -118,7 +118,7 @@ def _schedule(cfg, s, C): wmma_m = wmma_n = 8 wmma_k = 32 else: - raise ValueError("data dtype %s is not yet supported" % data_dtype) + raise ValueError(f"data dtype {data_dtype} is not yet supported") warp_size = 32 block_row_warps = cfg["block_row_warps"].val diff --git a/python/tvm/topi/cuda/conv2d.py b/python/tvm/topi/cuda/conv2d.py index bce032040dcd..fc9d51b2dd40 100644 --- a/python/tvm/topi/cuda/conv2d.py +++ b/python/tvm/topi/cuda/conv2d.py @@ -59,7 +59,7 @@ def conv2d_cudnn( tensor_format = 1 # CUDNN_TENSOR_NHWC N, H, W, _ = get_const_tuple(data.shape) else: - raise ValueError("Unsupported layout %s in cudnn" % layout) + raise ValueError(f"Unsupported layout {layout} in cudnn") CO, CI, KH, KW = get_const_tuple(kernel.shape) # handle dilation diff --git a/python/tvm/topi/cuda/conv2d_hwcn.py b/python/tvm/topi/cuda/conv2d_hwcn.py index 46a618ee3ed5..8786fbcc1aa0 100644 --- a/python/tvm/topi/cuda/conv2d_hwcn.py +++ b/python/tvm/topi/cuda/conv2d_hwcn.py @@ -155,7 +155,7 @@ def traverse(operator): B = operator.output(0) schedule(Apad, W, B) else: - raise RuntimeError("Unsupported operator: %s" % operator.tag) + raise RuntimeError(f"Unsupported operator: {operator.tag}") scheduled_ops.append(operator) diff --git a/python/tvm/topi/cuda/conv3d.py b/python/tvm/topi/cuda/conv3d.py index 6b602384b4fe..7a5e8ce69cb4 100644 --- a/python/tvm/topi/cuda/conv3d.py +++ b/python/tvm/topi/cuda/conv3d.py @@ -197,7 +197,7 @@ def conv3d_cudnn( tensor_format = 1 # CUDNN_TENSOR_NHWC N, D, H, W, _ = get_const_tuple(data.shape) else: - raise ValueError("Unsupported layout %s in cudnn" % layout) + raise ValueError(f"Unsupported layout {layout} in cudnn") CO, CI, KD, KH, KW = get_const_tuple(kernel.shape) assert groups == 1, "conv3d_cudnn does not support groups" diff --git a/python/tvm/topi/cuda/dense_tensorcore.py b/python/tvm/topi/cuda/dense_tensorcore.py index 4f3c98dfd019..506d94e60ea0 100644 --- a/python/tvm/topi/cuda/dense_tensorcore.py +++ b/python/tvm/topi/cuda/dense_tensorcore.py @@ -152,7 +152,7 @@ def _schedule_dense_tensorcore(cfg, s, C): wmma_m = wmma_n = 8 wmma_k = 32 else: - raise ValueError("data dtype %s is not yet supported" % data_dtype) + raise ValueError(f"data dtype {data_dtype} is not yet supported") warp_size = 32 block_row_warps = cfg["block_row_warps"].val diff --git a/python/tvm/topi/cuda/pooling.py b/python/tvm/topi/cuda/pooling.py index ba2e7da8e11e..a443f222b63b 100644 --- a/python/tvm/topi/cuda/pooling.py +++ b/python/tvm/topi/cuda/pooling.py @@ -85,7 +85,7 @@ def traverse(OP): else: _schedule_non_global(Pool) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) @@ -149,7 +149,7 @@ def traverse(OP): Pool = OP.output(0) _schedule(PaddedInput, Pool) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) diff --git a/python/tvm/topi/cuda/reduction.py b/python/tvm/topi/cuda/reduction.py index e4234a9cce3f..c3ddb59605be 100644 --- a/python/tvm/topi/cuda/reduction.py +++ b/python/tvm/topi/cuda/reduction.py @@ -155,7 +155,7 @@ def traverse_before_reduce(operator): if tensor.op not in scheduled_ops: traverse_before_reduce(tensor.op) else: - raise RuntimeError("Unsupported operator: %s" % operator.tag) + raise RuntimeError(f"Unsupported operator: {operator.tag}") scheduled_ops.append(operator) @@ -186,7 +186,7 @@ def traverse_after_reduce(operator): elif isinstance(operator, tvm.te.PlaceholderOp): pass else: - raise RuntimeError("Unsupported operator: %s" % operator.tag) + raise RuntimeError(f"Unsupported operator: {operator.tag}") scheduled_ops.append(operator) diff --git a/python/tvm/topi/generic/default.py b/python/tvm/topi/generic/default.py index f03c4971c9a6..65f24019de15 100644 --- a/python/tvm/topi/generic/default.py +++ b/python/tvm/topi/generic/default.py @@ -25,7 +25,7 @@ def default_schedule(outs, auto_inline): target = tvm.target.Target.current(allow_none=False) outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs if target.kind.name not in ("llvm", "c"): - raise RuntimeError("schedule not registered for '%s'" % target) + raise RuntimeError(f"schedule not registered for '{target}'") s = te.create_schedule([x.op for x in outs]) if auto_inline: x = outs[0] diff --git a/python/tvm/topi/generic/injective.py b/python/tvm/topi/generic/injective.py index 6b8109897b99..00c35b22b6c8 100644 --- a/python/tvm/topi/generic/injective.py +++ b/python/tvm/topi/generic/injective.py @@ -57,7 +57,7 @@ def schedule_injective(outs): """ target = tvm.target.Target.current(allow_none=False) if target.kind.name != "llvm": - raise RuntimeError("schedule_injective not registered for '%s'" % target) + raise RuntimeError(f"schedule_injective not registered for '{target}'") outs = [outs] if isinstance(outs, te.tensor.Tensor) else outs x = outs[0] s = te.create_schedule([x.op for x in outs]) diff --git a/python/tvm/topi/intel_graphics/conv2d_alter_op.py b/python/tvm/topi/intel_graphics/conv2d_alter_op.py index 199d984af1e4..3dc587e8710e 100644 --- a/python/tvm/topi/intel_graphics/conv2d_alter_op.py +++ b/python/tvm/topi/intel_graphics/conv2d_alter_op.py @@ -69,10 +69,10 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): # update new attrs new_attrs["channels"] = out_channel - new_attrs["data_layout"] = "NCHW%dc" % ic_bn + new_attrs["data_layout"] = f"NCHW{ic_bn}c" # (oc, ic, h, w) -> (OC, IC, h, w, ic, oc) - new_attrs["kernel_layout"] = "OIHW%di%do" % (ic_bn, oc_bn) - new_attrs["out_layout"] = "NCHW%dc" % oc_bn + new_attrs["kernel_layout"] = f"OIHW{ic_bn}i{oc_bn}o" + new_attrs["out_layout"] = f"NCHW{oc_bn}c" # Store altered operator's config new_data = te.placeholder( @@ -109,7 +109,7 @@ def _conv2d_infer_layout(workload, cfg): out_width = (in_width + 2 * padding[1] - k_width) // strides[1] + 1 tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic) - in_layout = "NCHW%dc" % tile_ic + in_layout = f"NCHW{tile_ic}c" out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc) - out_layout = "NCHW%dc" % tile_oc + out_layout = f"NCHW{tile_oc}c" return ((in_shape, in_layout),), ((out_shape, out_layout),) diff --git a/python/tvm/topi/mali/conv2d.py b/python/tvm/topi/mali/conv2d.py index 051914113a5b..ccd3090a9838 100644 --- a/python/tvm/topi/mali/conv2d.py +++ b/python/tvm/topi/mali/conv2d.py @@ -214,13 +214,7 @@ def _schedule_spatial_pack(cfg, s, op, layout): axis_lens = [VH, VW, VC] cfg["ann_spatial"].apply( - s, - conv, - unroll_vec_axes, - axis_lens, - max_unroll=max_unroll, - vec_size=vec_size, - cfg=cfg, + s, conv, unroll_vec_axes, axis_lens, max_unroll=max_unroll, vec_size=vec_size, cfg=cfg ) # schedule output @@ -433,13 +427,7 @@ def _schedule_winograd(cfg, s, op): if isinstance(U.op, tvm.te.ComputeOp): kernel, G = s[U].op.input_tensors s[G].compute_inline() - ( - eps, - nu, - co, - ci, - vco, - ) = s[U].op.axis + (eps, nu, co, ci, vco) = s[U].op.axis if not autotvm.GLOBAL_SCOPE.in_tuning: r_kh, r_kw = s[U].op.reduce_axis s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco) @@ -577,7 +565,7 @@ def _alter_conv2d_layout(attrs, inputs, tinfos, out_type): CO, _, KH, KW = get_const_tuple(kernel.shape) VC = cfg["tile_co"].size[-1] - new_attrs["kernel_layout"] = "OIHW%do" % VC + new_attrs["kernel_layout"] = f"OIHW{VC}o" new_data = data new_kernel = te.placeholder((idxd(CO, VC), CI, KH, KW, VC), dtype=kernel.dtype) diff --git a/python/tvm/topi/nn/dilate.py b/python/tvm/topi/nn/dilate.py index 6b2222e4a779..354aea6d0e6f 100644 --- a/python/tvm/topi/nn/dilate.py +++ b/python/tvm/topi/nn/dilate.py @@ -47,7 +47,7 @@ def dilate(data, strides, dilation_value=0.0, name="DilatedInput"): """ n = len(data.shape) if len(strides) != n: - raise ValueError("data dimension and strides size dismatch : %d vs %d" % (n, len(strides))) + raise ValueError(f"data dimension and strides size dismatch : {n} vs {len(strides)}") ana = tvm.arith.Analyzer() out_shape = tuple(ana.simplify((data.shape[i] - 1) * strides[i] + 1) for i in range(n)) diff --git a/python/tvm/topi/nn/pad.py b/python/tvm/topi/nn/pad.py index 4e76104fb08b..7bd2b7632b9c 100644 --- a/python/tvm/topi/nn/pad.py +++ b/python/tvm/topi/nn/pad.py @@ -53,11 +53,9 @@ def pad(data, pad_before, pad_after=None, pad_value=0.0, name="PadInput", attrs= n = len(data.shape) pad_after = pad_after if pad_after else pad_before if len(pad_before) != n: - raise ValueError( - "Input dimension and pad_before dismatch : %d vs %d" % (n, len(pad_before)) - ) + raise ValueError(f"Input dimension and pad_before dismatch : {n} vs {len(pad_before)}") if len(pad_after) != n: - raise ValueError("Input dimension and pad_after dismatch : %d vs %d" % (n, len(pad_before))) + raise ValueError(f"Input dimension and pad_after dismatch : {n} vs {len(pad_after)}") ana = tvm.arith.Analyzer() dshape = [] for dim in data.shape: @@ -119,11 +117,9 @@ def mirror_pad(data, pad_before, pad_after=None, mode="SYMMETRIC", name="MirrorP n = len(data.shape) pad_after = pad_after if pad_after else pad_before if len(pad_before) != n: - raise ValueError( - "Input dimension and pad_before dismatch : %d vs %d" % (n, len(pad_before)) - ) + raise ValueError(f"Input dimension and pad_before dismatch : {n} vs {len(pad_before)}") if len(pad_after) != n: - raise ValueError("Input dimension and pad_after dismatch : %d vs %d" % (n, len(pad_before))) + raise ValueError(f"Input dimension and pad_after dismatch : {n} vs {len(pad_after)}") ana = tvm.arith.Analyzer() out_shape = tuple(ana.simplify(data.shape[i] + pad_before[i] + pad_after[i]) for i in range(n)) assert mode in ("SYMMETRIC", "REFLECT") diff --git a/python/tvm/topi/nn/sparse.py b/python/tvm/topi/nn/sparse.py index e577104c3ddc..d3475653715d 100644 --- a/python/tvm/topi/nn/sparse.py +++ b/python/tvm/topi/nn/sparse.py @@ -610,7 +610,7 @@ def sparse_conv2d( dense_data, sparse_data, sparse_indices, sparse_indptr ) else: - raise ValueError("Unsupport Layout %s" % layout) + raise ValueError(f"Unsupport Layout {layout}") @auto_scheduler.register_task_input_check_func diff --git a/python/tvm/topi/random/kernel.py b/python/tvm/topi/random/kernel.py index 651e4dc1c744..464ea9634ab5 100644 --- a/python/tvm/topi/random/kernel.py +++ b/python/tvm/topi/random/kernel.py @@ -517,9 +517,10 @@ def uniform(gen, low, high, out_shape, out_dtype): Tensor of random numbers with shape `out_shape` and type `out_dtype`. """ new_gen, random_bits = threefry_generate(gen, out_shape) - assert out_dtype in ("float32", "float64"), ( - "Only support float32 or float64 for now, got %s" % out_dtype - ) + assert out_dtype in ( + "float32", + "float64", + ), f"Only support float32 or float64 for now, got {out_dtype}" if out_dtype == "float32": random_dtype = "uint32" nbits = 32 @@ -581,11 +582,7 @@ def normal(gen, mean, scale, out_shape, out_dtype): # Box-Muller transform need two pieces of original uniform data out_shape.insert(0, 2) new_gen, uniform_values = uniform( - gen, - tvm.tir.const(0.0, out_dtype), - tvm.tir.const(1.0, out_dtype), - out_shape, - out_dtype, + gen, tvm.tir.const(0.0, out_dtype), tvm.tir.const(1.0, out_dtype), out_shape, out_dtype ) two_pi = tvm.tir.const(2.0 * math.pi, out_dtype) uniform_values_1 = tvm.topi.strided_slice(uniform_values, [0], [1], strides=[1], axes=[0]) diff --git a/python/tvm/topi/sparse/csrmm.py b/python/tvm/topi/sparse/csrmm.py index 4d659c801103..7af9d30bddde 100644 --- a/python/tvm/topi/sparse/csrmm.py +++ b/python/tvm/topi/sparse/csrmm.py @@ -57,13 +57,10 @@ def csrmm_default(data, indices, indptr, weight, bias=None): ), "only support 2-dim csrmm" assert isinstance( weight, te.tensor.Tensor - ), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight)) + ), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`" assert ( data.dtype == weight.dtype - ), "Data and weight must have the same dtype, but they have %s and %s" % ( - data.dtype, - weight.dtype, - ) + ), f"Data and weight must have the same dtype, but they have {data.dtype} and {weight.dtype}" if bias is not None: assert len(bias.shape) == 1 M = simplify(indptr.shape[0] - 1) diff --git a/python/tvm/topi/sparse/csrmv.py b/python/tvm/topi/sparse/csrmv.py index 3c2016c6513a..d585b27ca7ab 100644 --- a/python/tvm/topi/sparse/csrmv.py +++ b/python/tvm/topi/sparse/csrmv.py @@ -50,13 +50,10 @@ def csrmv_default(data, indices, indptr, weight, bias=None): assert len(data.shape) == 1 and len(weight.shape) == 2, "only support 2-dim csrmv" assert isinstance( weight, te.tensor.Tensor - ), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight)) + ), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`" assert ( data.dtype == weight.dtype - ), "Data and weight must have the same dtype, but they have %s and %s" % ( - data.dtype, - weight.dtype, - ) + ), f"Data and weight must have the same dtype, but they have {data.dtype} and {weight.dtype}" if bias is not None: assert len(bias.shape) == 1 batch = indptr.shape[0] - 1 diff --git a/python/tvm/topi/sparse/dense.py b/python/tvm/topi/sparse/dense.py index 5c63e44f691a..9c13c4bae918 100644 --- a/python/tvm/topi/sparse/dense.py +++ b/python/tvm/topi/sparse/dense.py @@ -56,7 +56,7 @@ def dense_si(data, indices, indptr, weight, bias=None): ), "only support 2-dim dense" assert isinstance( weight, te.tensor.Tensor - ), "weight matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(weight)) + ), f"weight matrix is assumed to be tvm.te.Tensor, but weight is `{type(weight)}`" if bias is not None: assert len(bias.shape) == 1 dtype = data.dtype @@ -135,7 +135,7 @@ def dense_sw(data, w_data, w_indices, w_indptr, bias=None): ), "only support 2-dim dense" assert isinstance( data, te.tensor.Tensor - ), "data matrix is assumed to be tvm.te.Tensor, but weight is `%s`" % (type(data)) + ), f"data matrix is assumed to be tvm.te.Tensor, but weight is `{type(data)}`" if bias is not None: assert len(bias.shape) == 1 dtype = data.dtype @@ -212,10 +212,6 @@ def dense(data, weight, bias=None): else: raise NotImplementedError( "implementation for %s as data and %s as weights, " - "is not supported yet." - % ( - type(data), - type(weight), - ) + "is not supported yet." % (type(data), type(weight)) ) return ret diff --git a/python/tvm/topi/testing/dilate_python.py b/python/tvm/topi/testing/dilate_python.py index 43559e3cee12..0d0af28e7fec 100644 --- a/python/tvm/topi/testing/dilate_python.py +++ b/python/tvm/topi/testing/dilate_python.py @@ -45,10 +45,7 @@ def dilate_python(input_np, strides, dilation_value=0.0, out_dtype=None): """ assert len(input_np.shape) == len( strides - ), "Input dimension and strides size dismatch : %d vs %d" % ( - len(input_np.shape), - len(strides), - ) + ), f"Input dimension and strides size dismatch : {len(input_np.shape)} vs {len(strides)}" if out_dtype is None: out_dtype = input_np.dtype diff --git a/python/tvm/topi/utils.py b/python/tvm/topi/utils.py index 7580eac0216d..71599ad74a62 100644 --- a/python/tvm/topi/utils.py +++ b/python/tvm/topi/utils.py @@ -226,9 +226,7 @@ def select_array(i): now = tvm.tir.const(0.0, dtype) for ii in range(row): now = tvm.tir.Select( - tvm.tir.all(idxm(i, row) == ii), - tvm.tir.const(vector[ii], dtype), - now, + tvm.tir.all(idxm(i, row) == ii), tvm.tir.const(vector[ii], dtype), now ) return now @@ -357,17 +355,9 @@ def select_array(i, j): return now if attrs is None: - attrs = { - "const_matrix": True, - "schedule_rule": "None", - } + attrs = {"const_matrix": True, "schedule_rule": "None"} - return te.compute( - matrix.shape, - select_array, - name=name, - attrs=attrs, - ) + return te.compute(matrix.shape, select_array, name=name, attrs=attrs) def get_max_power2_factor(n, max_value=None): @@ -424,10 +414,7 @@ def get_shape(src_shape, src_layout, dst_layout): if isinstance(dst_layout, str): dst_layout = layout(dst_layout) - assert len(src_layout) == len(dst_layout), "Incompatible layout %s vs %s" % ( - src_layout, - dst_layout, - ) + assert len(src_layout) == len(dst_layout), f"Incompatible layout {src_layout} vs {dst_layout}" layout_mapping = bijective_layout(src_layout, dst_layout) dst_indices = layout_mapping.forward_index(tvm.runtime.convert(list(range(len(src_layout))))) diff --git a/python/tvm/topi/x86/binarize_pack.py b/python/tvm/topi/x86/binarize_pack.py index 34fcbfbcfdce..53c346c37969 100644 --- a/python/tvm/topi/x86/binarize_pack.py +++ b/python/tvm/topi/x86/binarize_pack.py @@ -45,7 +45,7 @@ def traverse(OP): Out = OP.output(0) _schedule(Out) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") traverse(outs[0].op) return s diff --git a/python/tvm/topi/x86/binary_dense.py b/python/tvm/topi/x86/binary_dense.py index be02cb9bce9d..0940af4fb161 100644 --- a/python/tvm/topi/x86/binary_dense.py +++ b/python/tvm/topi/x86/binary_dense.py @@ -64,7 +64,7 @@ def traverse(OP): weight = OP.input_tensors[1] _schedule(data, weight, output) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) diff --git a/python/tvm/topi/x86/bitserial_dense.py b/python/tvm/topi/x86/bitserial_dense.py index 5e5c5c7e4c02..86c58b60eaf2 100644 --- a/python/tvm/topi/x86/bitserial_dense.py +++ b/python/tvm/topi/x86/bitserial_dense.py @@ -192,7 +192,7 @@ def traverse(op): data = data.op.input_tensors[0] _schedule(cfg, s, data_vec, weight_vec, output) else: - raise RuntimeError("Unsupported operator: %s" % op.tag) + raise RuntimeError(f"Unsupported operator: {op.tag}") traverse(outs[0].op) return s diff --git a/python/tvm/topi/x86/conv2d.py b/python/tvm/topi/x86/conv2d.py index 25e8ffe94155..1b7f020d5014 100644 --- a/python/tvm/topi/x86/conv2d.py +++ b/python/tvm/topi/x86/conv2d.py @@ -78,9 +78,9 @@ def _conv2d_infer_layout(workload, cfg): out_width = idxdiv(in_width + pl + pr - dilated_kernel_w, strides[1]) + 1 tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] in_shape = (batch_size, idxdiv(in_channel, tile_ic), in_height, in_width, tile_ic) - in_layout = "NCHW%dc" % tile_ic + in_layout = f"NCHW{tile_ic}c" out_shape = (batch_size, idxdiv(out_channel, tile_oc), out_height, out_width, tile_oc) - out_layout = "NCHW%dc" % tile_oc + out_layout = f"NCHW{tile_oc}c" return ((in_shape, in_layout),), ((out_shape, out_layout),) @@ -254,14 +254,7 @@ def _callback(op): data_vec = conv_out.op.input_tensors[0] args = [s, cfg, data_vec, kernel_vec, conv_out, outs[0]] - ( - _, - _, - kh, - kw, - _, - _, - ) = get_const_tuple(kernel_vec.shape) + (_, _, kh, kw, _, _) = get_const_tuple(kernel_vec.shape) if kh == 1 and kw == 1: conv2d_avx_1x1._schedule_conv_NCHWc(*args) else: diff --git a/python/tvm/topi/x86/conv2d_avx_common.py b/python/tvm/topi/x86/conv2d_avx_common.py index 115106c7c87d..73283e7888dd 100644 --- a/python/tvm/topi/x86/conv2d_avx_common.py +++ b/python/tvm/topi/x86/conv2d_avx_common.py @@ -161,7 +161,7 @@ def _schedule_conv_NCHWc(s, cfg, data_vec, kernel_vec, conv_out, last): s[O].vectorize(oc_block) s[O].parallel(parallel_axis) else: - raise ValueError("Unsupported output ndim: %s" % out_ndim) + raise ValueError(f"Unsupported output ndim: {out_ndim}") return s diff --git a/python/tvm/topi/x86/dense_alter_op.py b/python/tvm/topi/x86/dense_alter_op.py index 790a5a2e0812..973f94ecb9e5 100644 --- a/python/tvm/topi/x86/dense_alter_op.py +++ b/python/tvm/topi/x86/dense_alter_op.py @@ -64,20 +64,11 @@ def _alter_dense_layout(attrs, inputs, tinfos, out_type): if cfg.is_fallback: _default_dense_pack_config(cfg, M, N, K) packw_bn = cfg["tile_x"].size[-1] - weight_layout = "NC%dn" % packw_bn - new_weight = te.placeholder( - (N // packw_bn, K, packw_bn), - dtype=weight_tensor.dtype, - ) + weight_layout = f"NC{packw_bn}n" + new_weight = te.placeholder((N // packw_bn, K, packw_bn), dtype=weight_tensor.dtype) # Relay dense doesn't have bias. new_workload = autotvm.task.args_to_workload( - [ - data_tensor, - new_weight, - None, - out_dtype, - ], - topi_impl, + [data_tensor, new_weight, None, out_dtype], topi_impl ) dispatch_ctx.update(target, new_workload, cfg) return relay.nn.contrib_dense_pack(inputs[0], inputs[1], weight_layout, None, out_dtype) diff --git a/python/tvm/topi/x86/depthwise_conv2d.py b/python/tvm/topi/x86/depthwise_conv2d.py index 2a1f7810ce8e..59d7412befc0 100644 --- a/python/tvm/topi/x86/depthwise_conv2d.py +++ b/python/tvm/topi/x86/depthwise_conv2d.py @@ -299,7 +299,7 @@ def _schedule_depthwise_conv2d_NCHWc_impl(s, cfg, data_vec, kernel_vec, conv_out s[O].vectorize(oc_block) s[O].parallel(parallel_axis) else: - raise ValueError("Unsupported output ndim: %s" % out_ndim) + raise ValueError(f"Unsupported output ndim: {out_ndim}") return s @@ -314,7 +314,7 @@ def _depthwise_conv2d_infer_layout(workload, cfg): out_width = (in_width + padding[1] + padding[3] - k_width) // strides[1] + 1 tile_ic, tile_oc = cfg["tile_ic"].size[-1], cfg["tile_oc"].size[-1] in_shape = (batch_size, in_channel // tile_ic, in_height, in_width, tile_ic) - in_layout = "NCHW%dc" % tile_ic + in_layout = f"NCHW{tile_ic}c" out_shape = (batch_size, out_channel // tile_oc, out_height, out_width, tile_oc) - out_layout = "NCHW%dc" % tile_oc + out_layout = f"NCHW{tile_oc}c" return ((in_shape, in_layout),), ((out_shape, out_layout),) diff --git a/python/tvm/topi/x86/pooling.py b/python/tvm/topi/x86/pooling.py index b3f4eedec67c..c70046e771f8 100644 --- a/python/tvm/topi/x86/pooling.py +++ b/python/tvm/topi/x86/pooling.py @@ -108,7 +108,7 @@ def traverse(OP): Pool = OP.output(0) _schedule(PaddedInput, Pool) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) @@ -153,7 +153,7 @@ def traverse(OP): Pool = OP.output(0) _parallel_sch(s[Pool], outs[0].shape) else: - raise RuntimeError("Unsupported operator: %s" % OP.tag) + raise RuntimeError(f"Unsupported operator: {OP.tag}") scheduled_ops.append(OP) diff --git a/python/tvm/topi/x86/reduction.py b/python/tvm/topi/x86/reduction.py index db3ea81b7358..349d4561497f 100644 --- a/python/tvm/topi/x86/reduction.py +++ b/python/tvm/topi/x86/reduction.py @@ -87,7 +87,7 @@ def traverse_before_reduce(operator): if tensor.op not in scheduled_ops: traverse_before_reduce(tensor.op) else: - raise RuntimeError("Unsupported operator: %s" % operator.tag) + raise RuntimeError(f"Unsupported operator: {operator.tag}") scheduled_ops.append(operator) @@ -112,7 +112,7 @@ def traverse_after_reduce(operator): elif isinstance(operator, tvm.te.PlaceholderOp): pass else: - raise RuntimeError("Unsupported operator: %s (tag: %s)" % (operator, operator.tag)) + raise RuntimeError(f"Unsupported operator: {operator} (tag: {operator.tag})") scheduled_ops.append(operator)