Skip to content

Commit

Permalink
[BE] Enforce missing override keyword (pytorch#104032)
Browse files Browse the repository at this point in the history
This PR enables `-Winconsistent-missing-destructor-override` and `-Winconsistent-missing-override`
and fixes violations.

<!--
copilot:summary
-->
### <samp>🤖 Generated by Copilot at 47e904e</samp>

This pull request updates the code of various classes and operators in the `caffe2` and `aten` subdirectories to use the `override` specifier instead of the `virtual` keyword for destructors and other virtual functions that override a base class function. This improves the code readability, quality, and consistency with C++ best practices. It also modifies the `./CMakeLists.txt` file to enable warnings for these specifiers, but disable errors.

Pull Request resolved: pytorch#104032
Approved by: https://github.com/malfet
  • Loading branch information
cyyever authored and pytorchmergebot committed Jun 24, 2023
1 parent 202a910 commit 483f748
Show file tree
Hide file tree
Showing 96 changed files with 159 additions and 160 deletions.
4 changes: 4 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -817,6 +817,8 @@ if(NOT MSVC)
append_cxx_flag_if_supported("-Wno-strict-aliasing" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wvla-extension" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wnewline-eof" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Winconsistent-missing-override" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Winconsistent-missing-destructor-override" CMAKE_CXX_FLAGS)
if("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
string(APPEND CMAKE_CXX_FLAGS " -Wno-range-loop-analysis")
string(APPEND CMAKE_CXX_FLAGS " -Wno-pass-failed")
Expand Down Expand Up @@ -861,6 +863,8 @@ if(NOT MSVC)

append_cxx_flag_if_supported("-Wno-error=pedantic" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wno-error=old-style-cast" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wno-error=inconsistent-missing-override" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wno-error=inconsistent-missing-destructor-override" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wconstant-conversion" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wno-invalid-partial-specialization" CMAKE_CXX_FLAGS)
append_cxx_flag_if_supported("-Wno-unused-private-field" CMAKE_CXX_FLAGS)
Expand Down
3 changes: 1 addition & 2 deletions aten/src/ATen/test/cpu_rng_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,7 @@ constexpr auto kCustomRNG = DispatchKey::CustomRNGKeyId;

struct TestCPUGenerator : public c10::GeneratorImpl {
TestCPUGenerator(uint64_t value) : GeneratorImpl{Device(DeviceType::CPU), DispatchKeySet(kCustomRNG)}, value_(value) { }
// NOLINTNEXTLINE(modernize-use-override)
~TestCPUGenerator() = default;
~TestCPUGenerator() override = default;
uint32_t random() { return value_; }
uint64_t random64() { return value_; }
c10::optional<float> next_float_normal_sample() { return next_float_normal_sample_; }
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/allgather_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class AllgatherOp final : public Operator<Context> {
}
}

virtual ~AllgatherOp() {}
~AllgatherOp() override {}

bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/allreduce_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ class AllreduceOp final : public Operator<Context> {
}
}

virtual ~AllreduceOp() {}
~AllreduceOp() override {}

bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/barrier_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class BarrierOp final : public Operator<Context> {
}
}

virtual ~BarrierOp() {}
~BarrierOp() override {}

bool RunOnDevice() override {
auto context = OperatorBase::Input<std::shared_ptr<::gloo::Context>>(0);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/broadcast_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class BroadcastOp final : public Operator<Context> {
}
}

virtual ~BroadcastOp() {}
~BroadcastOp() override {}

bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
Expand Down
4 changes: 2 additions & 2 deletions caffe2/contrib/gloo/common_world_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ class CreateCommonWorld final : public Operator<Context> {
initialize();
}

virtual ~CreateCommonWorld() {
~CreateCommonWorld() override {
}

CommonWorld rendezvousWithMPI() {
Expand Down Expand Up @@ -176,7 +176,7 @@ class CloneCommonWorld final : public Operator<Context> {
}
}

virtual ~CloneCommonWorld() {}
~CloneCommonWorld() override {}

bool RunOnDevice() override {
try {
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/reduce_scatter_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ class ReduceScatterOp final : public Operator<Context> {
}
}

virtual ~ReduceScatterOp() {}
~ReduceScatterOp() override {}

bool RunOnDevice() override {
std::call_once(once_, [&] { initialize(); });
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/gloo/store_handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class TORCH_API StoreHandlerWrapper : public ::gloo::rendezvous::Store {
public:
explicit StoreHandlerWrapper(StoreHandler& handler) : handler_(handler) {}

virtual ~StoreHandlerWrapper() {}
virtual ~StoreHandlerWrapper() override {}

virtual void set(const std::string& key, const std::vector<char>& data)
override;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ class TORCH_API Tensor : public NeuralNetData {
return D->getKind() == NNDataKind::Tensor;
}

NeuralNetData* clone() {
NeuralNetData* clone() override {
return new Tensor(name_);
}

Expand All @@ -188,7 +188,7 @@ class TORCH_API Tensor : public NeuralNetData {
name_ = name;
}

~Tensor() {}
~Tensor() override {}

private:
std::string name_;
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -1157,7 +1157,7 @@ class Operator : public OperatorBase {
template <class... Args> \
explicit name(Args&&... args) \
: Operator<Context>(std::forward<Args>(args)...) {} \
virtual ~name() noexcept {}
virtual ~name() noexcept override {}

// Helpers to implement runtime op polymorphism. Often it's convenient to make
// an op work on different input types (e.g. i32 vs i64 indices) or special-case
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/qtensor_serialization.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ template <class Context>
class QTensorSerializer : public BlobSerializerBase {
public:
QTensorSerializer() : context_() {}
~QTensorSerializer() {}
~QTensorSerializer() override {}
/**
* Serializes a Blob. Note that this blob has to contain QTensor<Context>.
*/
Expand Down
2 changes: 1 addition & 1 deletion caffe2/distributed/file_store_handler.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ namespace caffe2 {
class TORCH_API FileStoreHandler : public StoreHandler {
public:
explicit FileStoreHandler(const std::string& path, const std::string& prefix);
virtual ~FileStoreHandler();
~FileStoreHandler() override;

void set(const std::string& name, const std::string& data) override;

Expand Down
12 changes: 6 additions & 6 deletions caffe2/ideep/operators/conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,8 @@ class IDEEPConvOp : public IDEEPConvPoolOpBase {
algo_ = ialgo::convolution_winograd;
}
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPConvOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPConvOp() override {}

bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT_X);
Expand Down Expand Up @@ -199,8 +199,8 @@ class IDEEPConvFusionOp final : public IDEEPConvOp {
CAFFE_THROW("Unsupported conv fusion type!");
}
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPConvFusionOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPConvFusionOp() override {}
};

const char* kConvFusionDoc = R"DOC(
Expand Down Expand Up @@ -291,8 +291,8 @@ class IDEEPConvGradientOp final : public IDEEPConvPoolOpBase {
"In order to backward propagate weights correctly, "
"please set training_mode=1");
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPConvGradientOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPConvGradientOp() override {}

bool RunOnDeviceWithOrderNCHW() override {
const auto& X = Input(INPUT);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/ideep/operators/conv_pool_base_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ class IDEEPConvPoolOpBase : public ConvPoolOpBase<IDEEPContext> {
public:
IDEEPConvPoolOpBase(const OperatorDef& operator_def, Workspace* ws)
: ConvPoolOpBase<IDEEPContext>(operator_def, ws) {}
virtual ~IDEEPConvPoolOpBase() {}
~IDEEPConvPoolOpBase() override {}

inline const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
Expand Down
2 changes: 1 addition & 1 deletion caffe2/ideep/operators/conv_transpose_unpool_base_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ class IDEEPConvTransposeUnpoolBase : public IDEEPOperator {
CAFFE_ENFORCE_LE(adj_[dim], stride_[dim]);
}
}
virtual ~IDEEPConvTransposeUnpoolBase() {}
~IDEEPConvTransposeUnpoolBase() override {}

const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
Expand Down
4 changes: 2 additions & 2 deletions caffe2/ideep/operators/quantization/int8_add_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ class IDEEPInt8SumReluOp final : public IDEEPOperator {

Y_scales_ = ConvertScales({scale_});
}
// NOLINTNEXTLINE(modernize-use-equals-default,modernize-use-override)
virtual ~IDEEPInt8SumReluOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8SumReluOp() override {}

bool RunOnDevice() override {
itensor temp_ten;
Expand Down
16 changes: 8 additions & 8 deletions caffe2/ideep/operators/quantization/int8_conv_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@ class IDEEPInt8ConvOp : public IDEEPConvPoolOpBase {
CAFFE_ENFORCE(zero_point_ == 128 || zero_point_ == 0);
Y_scales_ = ConvertScales({scale_});
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8ConvOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8ConvOp() override {}

bool RunOnDeviceWithOrderNCHW() override {
const auto &X = Input(INPUT_X);
Expand Down Expand Up @@ -198,8 +198,8 @@ class IDEEPInt8ConvReluOp final : public IDEEPInt8ConvOp {
attr_ = iattr::fuse_relu();
fusion_type_ = FUSION_CONV_RELU;
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8ConvReluOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8ConvReluOp() override {}
};

class IDEEPInt8ConvSumOp final : public IDEEPInt8ConvOp {
Expand All @@ -213,8 +213,8 @@ class IDEEPInt8ConvSumOp final : public IDEEPInt8ConvOp {
attr_ = iattr::fuse_sum();
fusion_type_ = FUSION_CONV_SUM;
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8ConvSumOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8ConvSumOp() override {}
};

class IDEEPInt8ConvSumReluOp final : public IDEEPInt8ConvOp {
Expand All @@ -228,8 +228,8 @@ class IDEEPInt8ConvSumReluOp final : public IDEEPInt8ConvOp {
attr_ = iattr::residual();
fusion_type_ = FUSION_CONV_SUM_RELU;
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8ConvSumReluOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8ConvSumReluOp() override {}
};

REGISTER_IDEEP_OPERATOR_WITH_ENGINE(Int8Conv, DNNLOWP, IDEEPInt8ConvOp);
Expand Down
4 changes: 2 additions & 2 deletions caffe2/ideep/operators/quantization/int8_dequantize_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ class IDEEPInt8DequantizeOp final : public IDEEPOperator {
static_cast<int>(iformat::nchw)));
}
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8DequantizeOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8DequantizeOp() override {}

bool RunOnDevice() override {
const auto& X = Input(0);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ class IDEEPInt8FullyConnectedOp final : public IDEEPOperator {
}
Y_scales_ = ConvertScales({scale_});
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8FullyConnectedOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8FullyConnectedOp() override {}

bool RunOnDevice() override {
const auto& X = Input(INPUT);
Expand Down
4 changes: 2 additions & 2 deletions caffe2/ideep/operators/quantization/int8_quantize_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ class IDEEPInt8QuantizeOp final : public IDEEPOperator {
Y_data_type_ = zero_point_ == 0 ? idtype::u8 : idtype::s8;
Y_scales_ = ConvertScales({scale_});
}
// NOLINTNEXTLINE(modernize-use-override,modernize-use-equals-default)
virtual ~IDEEPInt8QuantizeOp() {}
// NOLINTNEXTLINE(modernize-use-equals-default)
~IDEEPInt8QuantizeOp() override {}

bool RunOnDevice() override {
const auto& X = Input(0);
Expand Down
8 changes: 4 additions & 4 deletions caffe2/ideep/utils/ideep_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,20 +23,20 @@ class IDEEPContext final : public BaseContext {

~IDEEPContext() noexcept override {}

inline void SwitchToDevice(int64_t /*stream_id*/) {}
inline void SwitchToDevice(int64_t /*stream_id*/) override {}
using BaseContext::SwitchToDevice;

inline void WaitEvent(const Event& ev) {
inline void WaitEvent(const Event& ev) override {
ev.Wait(IDEEP, this);
}

inline void Record(Event* ev, const char* err_msg = nullptr) const {
inline void Record(Event* ev, const char* err_msg = nullptr) const override {
CAFFE_ENFORCE(ev, "Event must not be null.");
ev->Record(IDEEP, this, err_msg);
}


inline void FinishDeviceComputation() {}
inline void FinishDeviceComputation() override {}

inline rand_gen_type& RandGenerator() {
if (!random_generator_.get()) {
Expand Down
4 changes: 2 additions & 2 deletions caffe2/ideep/utils/ideep_operator.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ class IDEEPOperator : public OperatorBase {
order_(StringToStorageOrder(
OperatorBase::GetSingleArgument<string>("order", "NCHW"))) {
}
virtual ~IDEEPOperator() {}
~IDEEPOperator() override {}

inline const ideep::tensor& Input(int index) {
return OperatorBase::template Input<ideep::tensor>(index);
Expand Down Expand Up @@ -114,7 +114,7 @@ class IDEEPOperator : public OperatorBase {
#define USE_SIMPLE_IDEEP_CTOR_DTOR(name) \
name(const OperatorDef& operator_def, Workspace* ws) \
: IDEEPOperator(operator_def, ws) {} \
virtual ~name() {}
~name() override {}

// Convert zero_point scales to min_max scales
// NOTE:
Expand Down
4 changes: 2 additions & 2 deletions caffe2/mpi/mpi_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ class MPIBroadcastOp final : public Operator<Context> {
MPIBroadcastOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
~MPIBroadcastOp() {}
~MPIBroadcastOp() override {}

bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
Expand Down Expand Up @@ -65,7 +65,7 @@ class MPIReduceOp final : public Operator<Context> {
MPIReduceOp(const OperatorDef& operator_def, Workspace* ws)
: Operator<Context>(operator_def, ws),
root_(OperatorBase::template GetSingleArgument<int>("root", 0)) {}
~MPIReduceOp() {}
~MPIReduceOp() override {}

bool RunOnDevice() override {
MPI_Comm comm = OperatorBase::Input<MPICommonWorldWrapper>(0).comm();
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/batch_gather_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class BatchGatherGradientOp final : public Operator<Context> {
: Operator<Context>(std::forward<Args>(args)...),
OP_SINGLE_ARG(int, "axis", axis_, 1),
OP_SINGLE_ARG(bool, "match_outer", match_outer_, false) {}
virtual ~BatchGatherGradientOp() noexcept {}
~BatchGatherGradientOp() noexcept override {}

bool RunOnDevice() override {
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
Expand Down
4 changes: 2 additions & 2 deletions caffe2/operators/batch_permutation_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class BatchPermutationOp final : public Operator<Context> {
: Operator<Context>(std::forward<Args>(args)...) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;

bool RunOnDevice();
bool RunOnDevice() override;
};

template <typename T, class Context>
Expand All @@ -29,7 +29,7 @@ class BatchPermutationGradientOp final : public Operator<Context> {
: Operator<Context>(def, ws) {}
USE_OPERATOR_CONTEXT_FUNCTIONS;

bool RunOnDevice();
bool RunOnDevice() override;
};

} // namespace caffe2
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/batch_sparse_to_dense_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ class BatchDenseToSparseOp : public Operator<Context> {
template <class... Args>
explicit BatchDenseToSparseOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
bool RunOnDevice() {
bool RunOnDevice() override{
return DispatchHelper<TensorTypes<int32_t, int64_t>>::call(
this, Input(LENGTHS));
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/box_with_nms_limit_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ class BoxWithNMSLimitOp final : public Operator<Context> {
input_scores_fg_cls_starting_id_ = (int)input_boxes_include_bg_cls_;
}

~BoxWithNMSLimitOp() {}
~BoxWithNMSLimitOp() override {}

bool RunOnDevice() override {
if (InputSize() > 2) {
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/channel_backprop_stats_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ class ChannelBackpropStatsOp : public Operator<Context> {
template <class... Args>
explicit ChannelBackpropStatsOp(Args&&... args)
: Operator<Context>(std::forward<Args>(args)...) {}
~ChannelBackpropStatsOp() {}
~ChannelBackpropStatsOp() override {}

bool RunOnDevice() override {
return true;
Expand Down
Loading

0 comments on commit 483f748

Please sign in to comment.