Skip to content

Commit

Permalink
yb/add clang-tidy and fix (DeepLink-org#76)
Browse files Browse the repository at this point in the history
* add clang-tidy for code static check and code style check
  • Loading branch information
yangbofun authored May 23, 2023
1 parent 6834e28 commit 52ae9cd
Show file tree
Hide file tree
Showing 87 changed files with 3,976 additions and 3,479 deletions.
68 changes: 68 additions & 0 deletions .clang-tidy
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
---
Checks: '
-*,
clang-diagnostic-*,
bugprone-*,
-bugprone-easily-swappable-parameters,
-bugprone-forward-declaration-namespace,
-bugprone-macro-parentheses,
-bugprone-lambda-function-name,
-bugprone-reserved-identifier,
-bugprone-swapped-arguments,
-bugprone-narrowing-conversions,
misc-*,-misc-const-correctness,
-misc-unused-parameters,
-misc-non-private-member-variables-in-classes,
-misc-no-recursion,
-misc-use-anonymous-namespace,
hicpp-avoid-goto,
modernize-*,
-modernize-concat-nested-namespaces,
-modernize-macro-to-enum,
-modernize-return-braced-init-list,
-modernize-use-auto,
-modernize-use-default-member-init,
-modernize-use-using,
-modernize-use-trailing-return-type,
-modernize-use-nodiscard,
-modernize-avoid-c-arrays
performance-*,
-performance-noexcept-move-constructor,
-performance-unnecessary-value-param,
readability-identifier-naming,
readability-container-size-empty,
'


# NOTE there must be no spaces before the '-', so put the comma last.
CheckOptions:
- key: readability-identifier-naming.ClassCase
value: "CamelCase"
- key: readability-identifier-naming.ClassMethodCase
value: "camelBack"
- key: readability-identifier-naming.EnumCase
value: "CamelCase"
- key: readability-identifier-naming.FunctionCase
value: "camelBack"
- key: readability-identifier-naming.MemberCase
value: "camelBack"
- key: readability-identifier-naming.MemberSuffix
value: "_"
- key: readability-identifier-naming.ParameterCase
value: "camelBack"
- key: readability-identifier-naming.UnionCase
value: "camelBack"
- key: readability-identifier-naming.VariableCase
value: "camelBack"
- key: readability-identifier-naming.IgnoreMainLikeFunctions
value: 1
- key: readability-redundant-member-init.IgnoreBaseInCopyConstructors
value: 1
- key: modernize-use-default-member-init.UseAssignment
value: 1

HeaderFilterRegex: '.*'
AnalyzeTemporaryDtors: false
WarningsAsErrors: '*'
UseColor: true
...
8 changes: 8 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -113,6 +113,14 @@ jobs:
export DIOPI_BUILD_TESTRT=ON
srun --job-name=${GITHUB_JOB} --partition=${SLURM_PAR_CAMB} --time=10 bash -c 'cd DIOPI-IMPL && bash scripts/build_impl.sh camb' || ( cd ${NFS_PATH}/${GITHUB_RUN_NUMBER}/ && rm -rf ${BUILD_TEST1} && exit 1 )
"""
- name: clang-tidy
run: |
ssh ${CLUSTER_CAMB} """
set -e
source /mnt/cache/share/platform/env/camb_ci_diopi_impl
cd ${NFS_PATH}/${GITHUB_RUN_NUMBER} && cd ${BUILD_TEST1}
srun --job-name=${GITHUB_JOB} --partition=${SLURM_PAR_CAMB} --time=10 bash -c 'cd DIOPI-IMPL && bash scripts/ci_script.sh clang-tidy' || ( cd ${NFS_PATH}/${GITHUB_RUN_NUMBER}/ && rm -rf ${BUILD_TEST1} && exit 1 )
"""
Build-Mmcv-Ext-Nvidia:
name: Build-Mmcv-Ext-Nvidia
Expand Down
2 changes: 1 addition & 1 deletion DIOPI-IMPL/camb/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -49,4 +49,4 @@ set(THIRD_PARTY_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}/../third_party/half/inc
set_target_properties(${DEVICEIMPL} PROPERTIES SUFFIX ".so")

target_link_libraries(${DEVICEIMPL} cndev cnrt cnnl cnmlrt)
target_include_directories(${DEVICEIMPL} PUBLIC ${THIRD_PARTY_INCLUDE_DIRS})
target_include_directories(${DEVICEIMPL} SYSTEM PUBLIC ${THIRD_PARTY_INCLUDE_DIRS})
12 changes: 6 additions & 6 deletions DIOPI-IMPL/camb/cnnl_helper.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ diopiError_t CnnlDataType::convertToCnnlType(cnnlDataType_t* cnnlType, diopiDtyp
*cnnlType = CNNL_DTYPE_INT64;
break;
default:
set_last_error_string("unkown diopitype error %d at %s:%d", type, __FILE__, __LINE__);
setLastErrorString("unkown diopitype error %d at %s:%d", type, __FILE__, __LINE__);
return diopiDtypeNotSupported;
}
return diopiSuccess;
Expand Down Expand Up @@ -125,7 +125,7 @@ const std::unordered_map<std::vector<diopiDtype_t>, cnnlCastDataType_t, HashCnnl

CnnlHandlePool cnnlHandlePool;

diopiError_t cnnl_transpose(
diopiError_t cnnlTranspose(
diopiContextHandle_t& ctx, cnnlHandle_t& handle, DiopiTensor& in, DiopiTensor& out, cnnlTensorLayout_t layoutIn, cnnlTensorLayout_t layoutOut) {
/* DEPRECATED AND WILL BE REMOVED */
DIOPI_CHECK(in.dtype() == out.dtype(), "the data type of input and output tensor should be the same.");
Expand All @@ -151,11 +151,11 @@ diopiError_t cnnl_transpose(
CnnlTensorDesc inDesc(in, layoutIn);
CnnlTensorDesc outDesc(out, layoutOut);
CnnlTransposeDescriptor transDesc(order.size(), order.data());
size_t workspace_size = 0;
DIOPI_CALLCNNL(cnnlGetTransposeWorkspaceSize(handle, inDesc.get(), transDesc.get(), &workspace_size));
size_t workspaceSize = 0;
DIOPI_CALLCNNL(cnnlGetTransposeWorkspaceSize(handle, inDesc.get(), transDesc.get(), &workspaceSize));

void* workspace_ptr = workspace_size == 0 ? requiresBuffer(ctx, workspace_size).data() : nullptr;
DIOPI_CALLCNNL(cnnlTranspose_v2(handle, transDesc.get(), inDesc.get(), in.data(), outDesc.get(), out.data(), workspace_ptr, workspace_size));
void* workspacePtr = workspaceSize == 0 ? requiresBuffer(ctx, workspaceSize).data() : nullptr;
DIOPI_CALLCNNL(cnnlTranspose_v2(handle, transDesc.get(), inDesc.get(), in.data(), outDesc.get(), out.data(), workspacePtr, workspaceSize));
return diopiSuccess;
}

Expand Down
44 changes: 22 additions & 22 deletions DIOPI-IMPL/camb/cnnl_helper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ namespace camb {
do { \
::cnnlStatus_t ret = Expr; \
if (ret != ::CNNL_STATUS_SUCCESS) { \
set_last_error_string("cnnl error %d : %s at %s:%d", ret, ::cnnlGetErrorString(ret), __FILE__, __LINE__); \
setLastErrorString("cnnl error %d : %s at %s:%d", ret, ::cnnlGetErrorString(ret), __FILE__, __LINE__); \
return diopiErrorOccurred; \
} \
} while (false);
Expand Down Expand Up @@ -99,8 +99,8 @@ class CnnlTensorDesc : public CnnlDescBase<cnnlTensorDescriptor_t, cnnlCreateTen
DIOPI_CALL(CnnlDataType::convertToCnnlType(&dtype, t.dtype()));

if (!dim) {
std::vector<int> dim_array(1, 1);
DIOPI_CALLCNNL(cnnlSetTensorDescriptorEx(get(), CNNL_LAYOUT_ARRAY, dtype, 1, dim_array.data(), dim_array.data()));
std::vector<int> dimArray(1, 1);
DIOPI_CALLCNNL(cnnlSetTensorDescriptorEx(get(), CNNL_LAYOUT_ARRAY, dtype, 1, dimArray.data(), dimArray.data()));
return diopiSuccess;
}

Expand All @@ -115,14 +115,14 @@ class CnnlTensorDesc : public CnnlDescBase<cnnlTensorDescriptor_t, cnnlCreateTen
} else if (layout == CNNL_LAYOUT_HWCN) {
// HWCN is only used by depthwise conv now, and the dim is 4
DIOPI_CHECK(dim == 4, "depthwise convolution input's dim must be 4!");
auto convert_shape_stride_hwcn = [](const std::vector<int64_t>& vec, std::vector<int>& target_vec) {
target_vec[0] = static_cast<int>(vec[2]);
target_vec[1] = static_cast<int>(vec[3]);
target_vec[2] = static_cast<int>(vec[1]);
target_vec[3] = static_cast<int>(vec[0]);
auto convertShapeStrideHwcn = [](const std::vector<int64_t>& vec, std::vector<int>& targetVec) {
targetVec[0] = static_cast<int>(vec[2]);
targetVec[1] = static_cast<int>(vec[3]);
targetVec[2] = static_cast<int>(vec[1]);
targetVec[3] = static_cast<int>(vec[0]);
};
convert_shape_stride_hwcn(dimSize, shape);
convert_shape_stride_hwcn(dimStride, stride);
convertShapeStrideHwcn(dimSize, shape);
convertShapeStrideHwcn(dimStride, stride);
} else {
for (size_t i = 0; i < dim; ++i) {
shape[i] = dimSize[i];
Expand Down Expand Up @@ -151,7 +151,7 @@ class CnnlHandlePool final {
cnnlHandle_t cnnlHandle;
cnnlCreate(&cnnlHandle);
cnnlSetQueue(cnnlHandle, queue);
cnnlHandlePool_.emplace(std::make_pair(queue, cnnlHandle));
cnnlHandlePool_.emplace(queue, cnnlHandle);
return cnnlHandle;
}

Expand All @@ -177,7 +177,7 @@ class CnnlHandlePool final {

class CnnlTransposeDescriptor final : public CnnlDescBase<cnnlTransposeDescriptor_t, cnnlCreateTransposeDescriptor, cnnlDestroyTransposeDescriptor> {
public:
CnnlTransposeDescriptor() {}
CnnlTransposeDescriptor() = default;

CnnlTransposeDescriptor(const int dim, const int* permute) { set(dim, permute); }

Expand All @@ -189,21 +189,21 @@ class CnnlTransposeDescriptor final : public CnnlDescBase<cnnlTransposeDescripto

class CnnlReduceDescriptor final : public CnnlDescBase<cnnlReduceDescriptor_t, cnnlCreateReduceDescriptor, cnnlDestroyReduceDescriptor> {
public:
CnnlReduceDescriptor() {}

diopiError_t set(DiopiTensor& t, std::vector<int64_t> axis, cnnlReduceOp_t reduce_op, cnnlReduceIndices_t is_indices, cnnlIndicesType_t indices_type,
cnnlDataType_t tensor_type) {
int axis_num = axis.size();
std::vector<int> axis_list(axis_num);
for (int i = 0; i < axis_num; i++) {
axis_list[i] = static_cast<int>(axis[i]);
CnnlReduceDescriptor() = default;

diopiError_t set(DiopiTensor& t, std::vector<int64_t> axis, cnnlReduceOp_t reduceOp, cnnlReduceIndices_t isIndices, cnnlIndicesType_t indicesType,
cnnlDataType_t tensorType) {
int axisNum = axis.size();
std::vector<int> axisList(axisNum);
for (int i = 0; i < axisNum; i++) {
axisList[i] = static_cast<int>(axis[i]);
}
DIOPI_CALLCNNL(cnnlSetReduceDescriptor(get(), axis_list.data(), axis_num, reduce_op, tensor_type, CNNL_NOT_PROPAGATE_NAN, is_indices, indices_type));
DIOPI_CALLCNNL(cnnlSetReduceDescriptor(get(), axisList.data(), axisNum, reduceOp, tensorType, CNNL_NOT_PROPAGATE_NAN, isIndices, indicesType));
return diopiSuccess;
}
};

diopiError_t cnnl_transpose(diopiContextHandle_t& ctx, cnnlHandle_t& handle, DiopiTensor& in, DiopiTensor& out, cnnlTensorLayout_t layoutIn,
diopiError_t cnnlTranspose(diopiContextHandle_t& ctx, cnnlHandle_t& handle, DiopiTensor& in, DiopiTensor& out, cnnlTensorLayout_t layoutIn,
cnnlTensorLayout_t layoutOut);

struct HashCnnlCastDType {
Expand Down
82 changes: 41 additions & 41 deletions DIOPI-IMPL/camb/common/basic_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,72 +4,72 @@ namespace impl {
namespace camb {

template <typename T1, typename T2, typename T3>
diopiError_t cnnl_op_tensor(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out, cnnlOpTensorDesc_t op_type, T1 alpha1, T2 alpha2,
diopiError_t cnnlOpTensor(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out, cnnlOpTensorDesc_t opType, T1 alpha1, T2 alpha2,
T3 beta) {
cnnlHandle_t handle = cnnlHandlePool.get(ctx);

DiopiTensor input_casted = input;
DiopiTensor other_casted = other;
DiopiTensor output_casted = out;
DiopiTensor inputCasted = input;
DiopiTensor otherCasted = other;
DiopiTensor outputCasted = out;

std::vector<DiopiTensor*> tensors{&input_casted, &other_casted, &output_casted};
std::vector<DiopiTensor*> tensors{&inputCasted, &otherCasted, &outputCasted};
DIOPI_CALL(autoCastTensorType(ctx, tensors, {diopi_dtype_float16, diopi_dtype_float32, diopi_dtype_int32}));

cnnlDataType_t comp_type;
DIOPI_CALL(CnnlDataType::convertToCnnlType(&comp_type, input_casted.dtype()));
cnnlDataType_t compType;
DIOPI_CALL(CnnlDataType::convertToCnnlType(&compType, inputCasted.dtype()));

CnnlResourceGuard<cnnlOpTensorDescriptor_t, cnnlCreateOpTensorDescriptor, cnnlDestroyOpTensorDescriptor> op_desc;
CnnlResourceGuard<cnnlOpTensorDescriptor_t, cnnlCreateOpTensorDescriptor, cnnlDestroyOpTensorDescriptor> opDesc;

DIOPI_CALLCNNL(cnnlSetOpTensorDescriptor(op_desc.get(), CNNL_OP_TENSOR_SUB, comp_type, CNNL_NOT_PROPAGATE_NAN));
DIOPI_CALLCNNL(cnnlSetOpTensorDescriptor(opDesc.get(), CNNL_OP_TENSOR_SUB, compType, CNNL_NOT_PROPAGATE_NAN));

std::shared_ptr<void> alpha1_value = nullptr;
std::shared_ptr<void> alpha2_value = nullptr;
std::shared_ptr<void> beta_value = nullptr;
std::shared_ptr<void> alpha1Value = nullptr;
std::shared_ptr<void> alpha2Value = nullptr;
std::shared_ptr<void> betaValue = nullptr;

if (DiopiDataType::isInteger(input_casted.dtype())) {
alpha1_value = std::make_shared<int32_t>(alpha1);
alpha2_value = std::make_shared<int32_t>(alpha2);
beta_value = std::make_shared<int32_t>(beta);
} else if (DiopiDataType::isFloatPoint(input_casted.dtype())) {
alpha1_value = std::make_shared<float>(alpha1);
alpha2_value = std::make_shared<float>(alpha2);
beta_value = std::make_shared<float>(beta);
if (DiopiDataType::isInteger(inputCasted.dtype())) {
alpha1Value = std::make_shared<int32_t>(alpha1);
alpha2Value = std::make_shared<int32_t>(alpha2);
betaValue = std::make_shared<int32_t>(beta);
} else if (DiopiDataType::isFloatPoint(inputCasted.dtype())) {
alpha1Value = std::make_shared<float>(alpha1);
alpha2Value = std::make_shared<float>(alpha2);
betaValue = std::make_shared<float>(beta);
} else {
set_last_error_string("%s", "cnnl op tensor only support int or float type.\n");
setLastErrorString("%s", "cnnl op tensor only support int or float type.\n");
return diopiDtypeNotSupported;
}
CnnlTensorDesc input_desc(input_casted, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc other_desc(other_casted, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc output_desc(output_casted, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc inputDesc(inputCasted, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc otherDesc(otherCasted, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc outputDesc(outputCasted, CNNL_LAYOUT_ARRAY);

size_t workspace_size = 0;
DIOPI_CALLCNNL(cnnlGetOpTensorWorkspaceSize(handle, input_desc.get(), other_desc.get(), output_desc.get(), &workspace_size));
size_t workspaceSize = 0;
DIOPI_CALLCNNL(cnnlGetOpTensorWorkspaceSize(handle, inputDesc.get(), otherDesc.get(), outputDesc.get(), &workspaceSize));

void* workspace = nullptr;
if (workspace_size != 0) {
workspace = requiresBuffer(ctx, workspace_size).data();
if (workspaceSize != 0) {
workspace = requiresBuffer(ctx, workspaceSize).data();
}

DIOPI_CALLCNNL(cnnlOpTensor(handle,
op_desc.get(),
alpha1_value.get(),
input_desc.get(),
input_casted.data(),
alpha2_value.get(),
other_desc.get(),
other_casted.data(),
opDesc.get(),
alpha1Value.get(),
inputDesc.get(),
inputCasted.data(),
alpha2Value.get(),
otherDesc.get(),
otherCasted.data(),
workspace,
workspace_size,
beta_value.get(),
output_desc.get(),
output_casted.data()));
workspaceSize,
betaValue.get(),
outputDesc.get(),
outputCasted.data()));

DIOPI_CALL(dataTypeCast(ctx, out, output_casted));
DIOPI_CALL(dataTypeCast(ctx, out, outputCasted));
return diopiSuccess;
}

// Explicitly instantiate the template function for use in other .cpp files.
template diopiError_t cnnl_op_tensor<double, double, double>(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out,
template diopiError_t cnnlOpTensor<double, double, double>(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out,
cnnlOpTensorDesc_t op_type, double alpha1, double alpha2, double beta);

} // namespace camb
Expand Down
16 changes: 8 additions & 8 deletions DIOPI-IMPL/camb/common/broadcast.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,16 @@ diopiError_t broadcast(diopiContextHandle_t ctx, DiopiTensor& out, const DiopiTe
return diopiSuccess;
}

diopiError_t broadcastHelper(diopiContextHandle_t ctx, DiopiTensor input_tensor, DiopiTensor target_tensor, DiopiTensor* out_tensor) {
diopiTensorHandle_t bcast_input = nullptr;
DiopiTensor bcast_input_tensor;
if (input_tensor.shape() != target_tensor.shape()) {
bcast_input_tensor = requiresTensor(ctx, vec2diopiSize_t(target_tensor.shape()), target_tensor.dtype());
DIOPI_CALL(broadcast(ctx, bcast_input_tensor, input_tensor));
diopiError_t broadcastHelper(diopiContextHandle_t ctx, DiopiTensor inputTensor, DiopiTensor targetTensor, DiopiTensor* outTensor) {
diopiTensorHandle_t bcastInput = nullptr;
DiopiTensor bcastInputTensor;
if (inputTensor.shape() != targetTensor.shape()) {
bcastInputTensor = requiresTensor(ctx, vec2diopiSizeT(targetTensor.shape()), targetTensor.dtype());
DIOPI_CALL(broadcast(ctx, bcastInputTensor, inputTensor));
} else {
bcast_input_tensor = input_tensor;
bcastInputTensor = inputTensor;
}
*out_tensor = bcast_input_tensor;
*outTensor = bcastInputTensor;
return diopiSuccess;
}

Expand Down
1 change: 1 addition & 0 deletions DIOPI-IMPL/camb/common/clone.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ diopiError_t clone(diopiContextHandle_t ctx, const DiopiTensor& inTensor, DiopiT
CnnlTensorDesc inTensorDesc(inTensor, CNNL_LAYOUT_ARRAY);
CnnlTensorDesc outTensorDesc(outTensor, CNNL_LAYOUT_ARRAY);
DIOPI_CALLCNNL(cnnlCopy(handle, inTensorDesc.get(), inTensor.data(), outTensorDesc.get(), outTensor.data()));
return diopiSuccess;
}

} // namespace camb
Expand Down
8 changes: 4 additions & 4 deletions DIOPI-IMPL/camb/common/common.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,14 +26,14 @@ diopiError_t autoCastTensorType(diopiContextHandle_t ctx, const std::vector<Diop

diopiError_t broadcast(diopiContextHandle_t ctx, DiopiTensor& out, const DiopiTensor& input);

diopiError_t broadcastHelper(diopiContextHandle_t ctx, DiopiTensor input_tensor, DiopiTensor target_tensor, DiopiTensor* out_tensor);
diopiError_t broadcastHelper(diopiContextHandle_t ctx, DiopiTensor inputTensor, DiopiTensor targetTensor, DiopiTensor* outTensor);

diopiError_t contiguous_(diopiContextHandle_t& ctx, DiopiTensor& src, MemoryFormat memory_format);
diopiError_t contiguous(diopiContextHandle_t& ctx, DiopiTensor& src, MemoryFormat memoryFormat);

diopiError_t contiguous_(diopiContextHandle_t& ctx, DiopiTensor& src, MemoryFormat memory_format, cnnlTensorLayout_t layout_in, cnnlTensorLayout_t layout_out);
diopiError_t contiguous(diopiContextHandle_t& ctx, DiopiTensor& src, MemoryFormat memoryFormat, cnnlTensorLayout_t layoutIn, cnnlTensorLayout_t layoutOut);

template<typename T1 = double, typename T2 = double, typename T3 = double>
diopiError_t cnnl_op_tensor(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out, cnnlOpTensorDesc_t op_type, T1 alpha1 = 1.0,
diopiError_t cnnlOpTensor(diopiContextHandle_t ctx, DiopiTensor input, DiopiTensor other, DiopiTensor out, cnnlOpTensorDesc_t opType, T1 alpha1 = 1.0,
T2 alpha2 = 1.0, T3 beta = 0.0);

diopiError_t clone(diopiContextHandle_t ctx, const DiopiTensor& inTensor, DiopiTensor& outTensor);
Expand Down
Loading

0 comments on commit 52ae9cd

Please sign in to comment.