From 78256184878c1ed721a2090ee5df0724dc5d1353 Mon Sep 17 00:00:00 2001 From: zhangzefeng92 Date: Wed, 31 Jul 2024 13:14:34 +0800 Subject: [PATCH] fix ci bug --- .../python/conformance/diopi_functions.py | 30 +++++++++++-------- impl/ascend/device_configs.py | 12 ++++++++ impl/camb/device_configs.py | 12 ++++++++ impl/torch/functions/functions.cpp | 15 ++-------- 4 files changed, 45 insertions(+), 24 deletions(-) diff --git a/diopi_test/python/conformance/diopi_functions.py b/diopi_test/python/conformance/diopi_functions.py index a023fb5fc3..6ea4c53690 100644 --- a/diopi_test/python/conformance/diopi_functions.py +++ b/diopi_test/python/conformance/diopi_functions.py @@ -1649,19 +1649,22 @@ def _foreach_add(self, scalar): ctx = self[0].context() num_tensors = len(self) func = check_function("diopiForeachaddScalar") - out = [] + tensors_in = [] + tensors_out = [] for i in range(num_tensors): - out.append(Tensor(self[i].size(),self[i].get_dtype())) + item_out = Tensor(self[i].size(),self[i].get_dtype()) + tensors_in.append(TensorP(self[i])) + tensors_out.append(TensorP(item_out)) ret = func( ctx, - get_capsule(out), - self, + list(tensors_out), + list(tensors_in), num_tensors, - scalar + Scalar(scalar) ) check_returncode(ret) - return out.value + return tensors_out def _foreach_mul(self, scalar): assert isinstance(scalar, (int, float)), "norm_type must be a int or float" @@ -1669,19 +1672,22 @@ def _foreach_mul(self, scalar): ctx = self[0].context() num_tensors = len(self) func = check_function("diopiForeachmulScalar") - out = [] + tensors_in = [] + tensors_out = [] for i in range(num_tensors): - out.append(Tensor(self[i].size(),self[i].get_dtype())) + item_out = Tensor(self[i].size(),self[i].get_dtype()) + tensors_in.append(TensorP(self[i])) + tensors_out.append(TensorP(item_out)) ret = func( ctx, - get_capsule(out), - self, + list(tensors_out), + list(tensors_in), num_tensors, - scalar + Scalar(scalar) ) check_returncode(ret) - return out.value + return tensors_out def batch_norm( input, diff --git a/impl/ascend/device_configs.py b/impl/ascend/device_configs.py index 70a49bb9b0..e8cea6ccde 100755 --- a/impl/ascend/device_configs.py +++ b/impl/ascend/device_configs.py @@ -428,6 +428,18 @@ ), ), + 'foreach_op': dict( + name=["_foreach_mul","_foreach_add"], + tensor_para=dict( + args=[ + { + "ins": ['self'], + "shape": [Skip(()),Skip((10,)),Skip((10, 2, 5)),Skip((20)),Skip((10, 5, 1)),Skip((20, 3, 4, 5)),Skip((20, 2, 3, 4, 5)),Skip((0,)), Skip((0, 10)), Skip((5, 0, 9))], + }, + ] + ) + ), + 'sigmoid_focal_loss': dict( name=['sigmoid_focal_loss'], tensor_para=dict( diff --git a/impl/camb/device_configs.py b/impl/camb/device_configs.py index 8799aec1f7..2ec6e7da7f 100644 --- a/impl/camb/device_configs.py +++ b/impl/camb/device_configs.py @@ -903,6 +903,18 @@ ), ), + 'foreach_op': dict( + name=["_foreach_mul","_foreach_add"], + tensor_para=dict( + args=[ + { + "ins": ['self'], + "shape": [Skip(()),Skip((10,)),Skip((10, 2, 5)),Skip((20)),Skip((10, 5, 1)),Skip((20, 3, 4, 5)),Skip((20, 2, 3, 4, 5)),Skip((0,)), Skip((0, 10)), Skip((5, 0, 9))], + }, + ] + ) + ), + 'transpose': dict( name=['transpose'], tensor_para=dict( diff --git a/impl/torch/functions/functions.cpp b/impl/torch/functions/functions.cpp index cda0ed467d..5b6125689c 100644 --- a/impl/torch/functions/functions.cpp +++ b/impl/torch/functions/functions.cpp @@ -1168,13 +1168,10 @@ diopiError_t diopiAddInpScalar(diopiContextHandle_t ctx, diopiTensorHandle_t inp diopiError_t diopiForeachaddScalar(diopiContextHandle_t ctx, diopiTensorHandle_t* outs, diopiConstTensorHandle_t* inputs, int64_t inputSize, const diopiScalar_t* other) { impl::aten::setCurStream(ctx); - DIOPI_IMPL_BUILD_ATEN_LIST(atOuts, outs, inputSize) DIOPI_IMPL_BUILD_ATEN_LIST(atInputs, inputs, inputSize) auto atOther = impl::aten::buildAtScalar(other); auto tempOut = CALL_ATEN_CUDA_FUNC(_foreach_add, atInputs, atOther); - for (int i = 0; i < inputSize; i++) { - *(reinterpret_cast(outs[i])) = tempOut[i]; - } + impl::aten::buildDiopiTensor(ctx, tempOut, out); return diopiSuccess; } @@ -1284,12 +1281,9 @@ diopiError_t diopiForeachmulScalar(diopiContextHandle_t ctx, diopiTensorHandle_t DIOPI_CHECK_PTR(outs); impl::aten::setCurStream(ctx); DIOPI_IMPL_BUILD_ATEN_LIST(atInputs, inputs, inputSize) - DIOPI_IMPL_BUILD_ATEN_LIST(atOuts, outs, inputSize) auto atOther = impl::aten::buildAtScalar(other); auto tempOut = CALL_ATEN_CUDA_FUNC(_foreach_mul, atInputs, atOther); - for (int i = 0; i < inputSize; i++) { - *(reinterpret_cast(outs[i])) = tempOut[i]; - } + impl::aten::buildDiopiTensor(ctx, tempOut, out); return diopiSuccess; } @@ -1308,12 +1302,9 @@ diopiError_t diopiForeachmulTensor(diopiContextHandle_t ctx, diopiTensorHandle_t DIOPI_CHECK_PTR(outs); impl::aten::setCurStream(ctx); DIOPI_IMPL_BUILD_ATEN_LIST(atInputs, inputs, inputSize) - DIOPI_IMPL_BUILD_ATEN_LIST(atOuts, outs, inputSize) auto atOther = impl::aten::buildATen(other); auto tempOut = CALL_ATEN_CUDA_FUNC(_foreach_mul, atInputs, atOther); - for (int i = 0; i < inputSize; i++) { - *(reinterpret_cast(outs[i])) = tempOut[i]; - } + impl::aten::buildDiopiTensor(ctx, tempOut, out); return diopiSuccess; }