|
12 | 12 |
|
13 | 13 | #include "common/DataUtils.hpp" |
14 | 14 |
|
15 | | -namespace rajaperf |
16 | | -{ |
17 | | -namespace basic |
18 | | -{ |
| 15 | +namespace rajaperf { |
| 16 | +namespace basic { |
19 | 17 |
|
20 | | - |
21 | | -DAXPY_ATOMIC::DAXPY_ATOMIC(const RunParams& params) |
22 | | - : KernelBase(rajaperf::Basic_DAXPY_ATOMIC, params) |
23 | | -{ |
| 18 | +DAXPY_ATOMIC::DAXPY_ATOMIC(const RunParams ¶ms) |
| 19 | + : KernelBase(rajaperf::Basic_DAXPY_ATOMIC, params) { |
24 | 20 | setDefaultProblemSize(1000000); |
25 | 21 | setDefaultReps(500); |
26 | 22 |
|
27 | | - setActualProblemSize( getTargetProblemSize() ); |
| 23 | + setActualProblemSize(getTargetProblemSize()); |
28 | 24 |
|
29 | | - setItsPerRep( getActualProblemSize() ); |
| 25 | + setItsPerRep(getActualProblemSize()); |
30 | 26 | setKernelsPerRep(1); |
31 | | - setBytesPerRep( (1*sizeof(Real_type) + 2*sizeof(Real_type)) * getActualProblemSize() ); |
| 27 | + setBytesPerRep((1 * sizeof(Real_type) + 2 * sizeof(Real_type)) * |
| 28 | + getActualProblemSize()); |
32 | 29 | setFLOPsPerRep(2 * getActualProblemSize()); |
33 | 30 |
|
34 | 31 | setUsesFeature(Forall); |
35 | 32 |
|
36 | | - setVariantDefined( Base_Seq ); |
37 | | - setVariantDefined( Lambda_Seq ); |
38 | | - setVariantDefined( RAJA_Seq ); |
| 33 | + setVariantDefined(Base_Seq); |
| 34 | + setVariantDefined(Lambda_Seq); |
| 35 | + setVariantDefined(RAJA_Seq); |
39 | 36 |
|
40 | | - setVariantDefined( Base_OpenMP ); |
41 | | - setVariantDefined( Lambda_OpenMP ); |
42 | | - setVariantDefined( RAJA_OpenMP ); |
| 37 | + setVariantDefined(Base_OpenMP); |
| 38 | + setVariantDefined(Lambda_OpenMP); |
| 39 | + setVariantDefined(RAJA_OpenMP); |
43 | 40 |
|
44 | | - setVariantDefined( Base_OpenMPTarget ); |
45 | | - setVariantDefined( RAJA_OpenMPTarget ); |
| 41 | + setVariantDefined(Base_OpenMPTarget); |
| 42 | + setVariantDefined(RAJA_OpenMPTarget); |
46 | 43 |
|
47 | | - setVariantDefined( Base_CUDA ); |
48 | | - setVariantDefined( Lambda_CUDA ); |
49 | | - setVariantDefined( RAJA_CUDA ); |
| 44 | + setVariantDefined(Base_CUDA); |
| 45 | + setVariantDefined(Lambda_CUDA); |
| 46 | + setVariantDefined(RAJA_CUDA); |
50 | 47 |
|
51 | | - setVariantDefined( Base_HIP ); |
52 | | - setVariantDefined( Lambda_HIP ); |
53 | | - setVariantDefined( RAJA_HIP ); |
54 | | -} |
| 48 | + setVariantDefined(Base_HIP); |
| 49 | + setVariantDefined(Lambda_HIP); |
| 50 | + setVariantDefined(RAJA_HIP); |
55 | 51 |
|
56 | | -DAXPY_ATOMIC::~DAXPY_ATOMIC() |
57 | | -{ |
| 52 | + setVariantDefined(Kokkos_Lambda); |
58 | 53 | } |
59 | 54 |
|
60 | | -void DAXPY_ATOMIC::setUp(VariantID vid) |
61 | | -{ |
| 55 | +DAXPY_ATOMIC::~DAXPY_ATOMIC() {} |
| 56 | + |
| 57 | +void DAXPY_ATOMIC::setUp(VariantID vid) { |
62 | 58 | allocAndInitDataConst(m_y, getActualProblemSize(), 0.0, vid); |
63 | 59 | allocAndInitData(m_x, getActualProblemSize(), vid); |
64 | 60 | initData(m_a); |
65 | 61 | } |
66 | 62 |
|
67 | | -void DAXPY_ATOMIC::updateChecksum(VariantID vid) |
68 | | -{ |
| 63 | +void DAXPY_ATOMIC::updateChecksum(VariantID vid) { |
69 | 64 | checksum[vid] += calcChecksum(m_y, getActualProblemSize()); |
70 | 65 | } |
71 | 66 |
|
72 | | -void DAXPY_ATOMIC::tearDown(VariantID vid) |
73 | | -{ |
74 | | - (void) vid; |
| 67 | +void DAXPY_ATOMIC::tearDown(VariantID vid) { |
| 68 | + (void)vid; |
75 | 69 | deallocData(m_x); |
76 | 70 | deallocData(m_y); |
77 | 71 | } |
|
0 commit comments