Skip to content

Commit

Permalink
use irange for loops 6 (pytorch#66742)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: pytorch#66742

Modified loops in files under fbsource/fbcode/caffe2/ from the format

`for(TYPE var=x0;var<x_max;x++)`

to the format

`for(const auto var: irange(xmax))`

This was achieved by running r-barnes's loop upgrader script (D28874212) with some modification to exclude all files under /torch/jit and a number of reversions or unused variable suppression warnings added by hand.

Test Plan: Sandcastle

Reviewed By: malfet

Differential Revision: D31705366

fbshipit-source-id: be58222426c192406a7f93c21582c3f6f2082401
  • Loading branch information
r-barnes authored and facebook-github-bot committed Dec 8, 2021
1 parent 9a7732e commit 1433160
Show file tree
Hide file tree
Showing 94 changed files with 558 additions and 480 deletions.
2 changes: 1 addition & 1 deletion caffe2/ideep/operators/conv_pool_base_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ class IDEEPConvPoolOpBase : public ConvPoolOpBase<IDEEPContext> {

bool RunOnDevice() override {
if (!global_pooling_) {
for (int dim = 0; dim < kernel_.size(); ++dim) {
for (const auto dim : c10::irange(kernel_.size())) {
CAFFE_ENFORCE_GT(kernel_[dim], 0);
}
}
Expand Down
4 changes: 2 additions & 2 deletions caffe2/ideep/operators/conv_transpose_unpool_base_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
CAFFE_ENFORCE_EQ(pads_.size(), 2 * kernel_.size());
}

for (int dim = 0; dim < kernel_.size(); ++dim) {
for (const auto dim : c10::irange(kernel_.size())) {
CAFFE_ENFORCE_GT(kernel_[dim], 0);
CAFFE_ENFORCE_GT(stride_[dim], 0);
CAFFE_ENFORCE_GE(adj_[dim], 0);
Expand Down Expand Up @@ -143,7 +143,7 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
auto input_dims = input.get_dims();
itensor::dims dims;
dims.assign(input_dims.begin() + 2, input_dims.end());
for (int dim = 0; dim < dims.size(); ++dim) {
for (const auto dim : c10::irange(dims.size())) {
int dim_size = 0;
ComputeSizeAndPad(
dims[dim],
Expand Down
6 changes: 3 additions & 3 deletions caffe2/ideep/operators/operator_fallback_ideep.h
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ class IDEEPFallbackOp final : public IDEEPOperator {
// Create output blobs in parent workspace,
// then forward output blobs to local workspace.
std::unordered_map<string, string> forwarded_output_blobs;
for (int i = 0; i < base_def_.output_size(); i++) {
for (const auto i : c10::irange(base_def_.output_size())) {
// For in-place case, the in/output tensor for local_ws must be
// re-created, instead of forwarding from current workspace.
string parent_name(base_def_.output(i));
Expand Down Expand Up @@ -81,7 +81,7 @@ class IDEEPFallbackOp final : public IDEEPOperator {
}

bool RunOnDevice() override {
for (int i = 0; i < InputSize(); ++i) {
for (const auto i : c10::irange(InputSize())) {
if (InputIsType<itensor>(i)
&& (Input(i).has_scale()
|| Input(i).get_data_type() == idtype::f32)) {
Expand Down Expand Up @@ -128,7 +128,7 @@ class IDEEPFallbackOp final : public IDEEPOperator {
return false;
}

for (int i = 0; i < OutputSize(); ++i) {
for (const auto i : c10::irange(OutputSize())) {
if (SkipOutputCopy::Contains(i)) {
VLOG(1) << "Copy output: index " << i << " skipped.";
continue;
Expand Down
2 changes: 1 addition & 1 deletion caffe2/ideep/utils/ideep_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class IDEEPContext final : public BaseContext {
static_cast<const void*>(src),
static_cast<void*>(dst));
} else {
for (size_t i = 0; i < n; ++i) {
for (const auto i : c10::irange(n)) {
dst[i] = src[i];
}
}
Expand Down
Loading

0 comments on commit 1433160

Please sign in to comment.