Skip to content

Commit 5e61a32

Browse files
committed
* review
1 parent dfe2ddd commit 5e61a32

File tree

3 files changed

+48
-47
lines changed

3 files changed

+48
-47
lines changed

apps/cpp_clml/clml_runner.cc

Lines changed: 33 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -59,12 +59,12 @@ CLMLRunner::CLMLRunner(std::string name, ToolArgs& args, cl_platform_id arg_plat
5959
cl_int majorVersions[MAX_VERSIONS];
6060
cl_int minorVersions[MAX_VERSIONS];
6161
cl_uint numVersions = 0;
62-
result = clQueryMLInterfaceVersionsQCOM(NULL, NULL, 0, &numVersions);
62+
result = clQueryMLInterfaceVersionsQCOM(nullptr, nullptr, 0, &numVersions);
6363
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
6464
CLML_SDK_TEST_AND_EXIT(numVersions > 0u);
6565
CLML_SDK_TEST_AND_EXIT(numVersions <= MAX_VERSIONS);
6666

67-
result = clQueryMLInterfaceVersionsQCOM(majorVersions, minorVersions, numVersions, NULL);
67+
result = clQueryMLInterfaceVersionsQCOM(majorVersions, minorVersions, numVersions, nullptr);
6868
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
6969

7070
for (cl_uint i = 0; i < numVersions; ++i) {
@@ -74,7 +74,7 @@ CLMLRunner::CLMLRunner(std::string name, ToolArgs& args, cl_platform_id arg_plat
7474
break;
7575
}
7676
}
77-
CLML_SDK_TEST_AND_EXIT(this->h_ClmlIntf != NULL);
77+
CLML_SDK_TEST_AND_EXIT(this->h_ClmlIntf != nullptr);
7878

7979
result = h_ClmlIntf->clCreateMLTuningCacheQCOM(&tuning_cache);
8080
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
@@ -103,8 +103,8 @@ int CLMLRunner::Run(void) {
103103
cl_int result;
104104

105105
for (size_t i = 0; i < this->function.size(); ++i) {
106-
result =
107-
h_ClmlIntf->clEnqueueMLOpQCOM(queue, this->function[i], this->descriptorSet, 0, NULL, NULL);
106+
result = h_ClmlIntf->clEnqueueMLOpQCOM(queue, this->function[i], this->descriptorSet, 0,
107+
nullptr, nullptr);
108108
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
109109
}
110110
if (!r_args.output.empty()) {
@@ -155,13 +155,13 @@ void CLMLRunner::PrintMetaInfo(void) { LOG(INFO) << "\n" << this->meta_info; }
155155
void CLMLRunner::CopyDataToCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> tensor,
156156
void* data, cl_ml_tensor_layout_qcom layout) {
157157
cl_int result = 0;
158-
cl_event evt = NULL;
158+
cl_event evt = nullptr;
159159
result = h_ClmlIntf->clEnqueueWriteMLTensorDataQCOM(this->queue, data, layout, tensor->tensor,
160160
tensor->memory,
161-
0, // n waitlist
162-
NULL, // waitlist
163-
&evt); // event
164-
CLML_SDK_TEST_AND_EXIT((evt != NULL) && result == CL_SUCCESS);
161+
0, // n waitlist
162+
nullptr, // waitlist
163+
&evt); // event
164+
CLML_SDK_TEST_AND_EXIT((evt != nullptr) && result == CL_SUCCESS);
165165
}
166166

167167
/*!
@@ -173,12 +173,12 @@ void CLMLRunner::CopyDataToCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_q
173173
void CLMLRunner::CopyDataFromCLMLTensor(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> tensor,
174174
void* data, cl_ml_tensor_layout_qcom layout) {
175175
cl_int result = 0;
176-
cl_event readEvent = NULL;
176+
cl_event readEvent = nullptr;
177177
// Read the output tensor
178178
result = h_ClmlIntf->clEnqueueReadMLTensorDataQCOM(this->queue, tensor->tensor, tensor->memory,
179179
data, layout,
180180
0, // n waitlist
181-
NULL, // waitlist
181+
nullptr, // waitlist
182182
&readEvent); // event
183183
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
184184
result = clWaitForEvents(1, &readEvent);
@@ -194,12 +194,12 @@ cl_int CLMLRunner::AllocateTensorMemory(
194194
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> pTensorMemDesc) {
195195
uint32_t size = 0;
196196
cl_int result = CL_OUT_OF_HOST_MEMORY;
197-
cl_mem buffer = NULL;
197+
cl_mem buffer = nullptr;
198198

199199
result = h_ClmlIntf->clGetMLTensorMemorySizeQCOM(context, pTensorMemDesc->tensor, &size);
200200
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
201201

202-
buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, NULL, &result);
202+
buffer = clCreateBuffer(context, CL_MEM_READ_WRITE, size, nullptr, &result);
203203
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
204204

205205
pTensorMemDesc->memory = buffer;
@@ -257,7 +257,7 @@ void CLMLRunner::MakeUnusedTensor(void) {
257257
cl_ml_tensor_desc_qcom desc = {};
258258
desc.num_dimensions = CL_TENSOR_UNUSED_QCOM;
259259
this->unusedTensor = std::make_shared<cl_ml_tensor_memory_desc_qcom>();
260-
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, NULL, &desc,
260+
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, nullptr, &desc,
261261
&(this->unusedTensor->tensor));
262262
CLML_SDK_TEST_AND_EXIT(this->unusedTensor && result == CL_SUCCESS);
263263
}
@@ -321,7 +321,8 @@ std::shared_ptr<cl_ml_tensor_memory_desc_qcom> CLMLRunner::MakeCLMLTensor(
321321
auto tensor_dsc = std::make_shared<cl_ml_tensor_memory_desc_qcom>();
322322
cl_ml_tensor_desc_qcom desc = {
323323
cl_dtype, layout, dims.n, dims.c, dims.h, dims.w, 0, CL_TENSOR_DIMENSIONS_4D_QCOM, {0}};
324-
result = this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, NULL, &desc, &tensor_dsc->tensor);
324+
result =
325+
this->h_ClmlIntf->clCreateMLTensorQCOM(this->context, nullptr, &desc, &tensor_dsc->tensor);
325326
CLML_SDK_TEST_AND_EXIT(tensor_dsc->tensor && result == CL_SUCCESS);
326327
return tensor_dsc;
327328
}
@@ -372,7 +373,7 @@ void CLMLRunner::MakeConv2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
372373
{clml_dilation[0], clml_dilation[1]},
373374
0,
374375
cl_arithmetic_mode};
375-
cl_ml_op_qcom op = NULL;
376+
cl_ml_op_qcom op = nullptr;
376377
if (!has_act) {
377378
result = h_ClmlIntf->clCreateMLOpConvolutionForwardQCOM(
378379
this->context, 0, &conv_desc, input_desc->tensor, weight_desc->tensor, bias_desc->tensor,
@@ -381,7 +382,7 @@ void CLMLRunner::MakeConv2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
381382
} else {
382383
result = h_ClmlIntf->clCreateMLOpFusedConvolutionActivationForwardQCOM(
383384
this->context, 0, &conv_desc, &act_desc, input_desc->tensor, weight_desc->tensor,
384-
bias_desc->tensor, NULL, output_desc->tensor, &op, tuning_cache);
385+
bias_desc->tensor, nullptr, output_desc->tensor, &op, tuning_cache);
385386
CLML_SDK_TEST_AND_EXIT(op && result == CL_SUCCESS);
386387
}
387388
this->function.push_back(op);
@@ -443,7 +444,7 @@ void CLMLRunner::MakeConv2DWithBN(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
443444
{clml_dilation[0], clml_dilation[1]},
444445
0,
445446
cl_arithmetic_mode};
446-
cl_ml_op_qcom op = NULL;
447+
cl_ml_op_qcom op = nullptr;
447448
cl_ml_op_batchnorm_desc_qcom bn_desc = {CL_BATCHNORM_MODE_SPATIAL_QCOM, cl_arithmetic_mode};
448449
if (!has_act) {
449450
result = h_ClmlIntf->clCreateMLOpFusedConvolutionBatchNormForwardQCOM(
@@ -454,7 +455,7 @@ void CLMLRunner::MakeConv2DWithBN(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
454455
} else {
455456
result = h_ClmlIntf->clCreateMLOpFusedConvolutionBatchNormActivationForwardQCOM(
456457
this->context, 0, &conv_desc, &bn_desc, &act_desc, input_desc->tensor, weight_desc->tensor,
457-
bias_desc->tensor, output_desc->tensor, NULL, bn_mean->tensor, bn_var->tensor,
458+
bias_desc->tensor, output_desc->tensor, nullptr, bn_mean->tensor, bn_var->tensor,
458459
bn_scale->tensor, bn_bias->tensor, &op, tuning_cache);
459460
CLML_SDK_TEST_AND_EXIT(op && result == CL_SUCCESS);
460461
}
@@ -472,7 +473,7 @@ void CLMLRunner::MakeRelu(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_d
472473
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
473474
cl_activation_function_qcom relu_type, std::string dtype) {
474475
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
475-
cl_ml_op_qcom op = NULL;
476+
cl_ml_op_qcom op = nullptr;
476477
cl_int result;
477478
cl_ml_op_activation_desc_qcom act_desc = {relu_type, CL_PROPAGATE_NAN_QCOM, cl_arithmetic_mode};
478479

@@ -502,7 +503,7 @@ void CLMLRunner::MakeBatchNorm(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> in
502503
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> bn_var,
503504
std::vector<float> bn_attrs, std::string dtype) {
504505
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
505-
cl_ml_op_qcom op = NULL;
506+
cl_ml_op_qcom op = nullptr;
506507
cl_int result;
507508

508509
cl_ml_op_batchnorm_desc_qcom bn_desc = {CL_BATCHNORM_MODE_SPATIAL_QCOM, cl_arithmetic_mode};
@@ -531,7 +532,7 @@ void CLMLRunner::MakePool2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input
531532
std::vector<cl_uint> padding, std::string pool_type,
532533
std::string dtype) {
533534
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
534-
cl_ml_op_qcom op = NULL;
535+
cl_ml_op_qcom op = nullptr;
535536
cl_int result;
536537

537538
cl_ml_op_pooling_desc_qcom pool_desc = {
@@ -567,7 +568,7 @@ void CLMLRunner::MakeGlobalPool2D(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
567568
std::vector<cl_uint> in_shape, std::string pool_type,
568569
std::string dtype) {
569570
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
570-
cl_ml_op_qcom op = NULL;
571+
cl_ml_op_qcom op = nullptr;
571572
cl_int result;
572573
cl_ml_op_pooling_desc_qcom pool_desc = {
573574
pool_type == "nn.global_max_pool2d" ? CL_POOLING_MODE_MAX_QCOM
@@ -599,7 +600,7 @@ void CLMLRunner::MakeReshape(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inpu
599600
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
600601
std::string dtype) {
601602
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
602-
cl_ml_op_qcom op = NULL;
603+
cl_ml_op_qcom op = nullptr;
603604
cl_int result;
604605

605606
result = h_ClmlIntf->clCreateMLOpReshapeQCOM(this->context, 0, input_desc->tensor,
@@ -620,7 +621,7 @@ void CLMLRunner::MakeConcatenate(
620621
std::vector<std::shared_ptr<cl_ml_tensor_memory_desc_qcom>> in_list,
621622
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc, int axis, std::string dtype) {
622623
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
623-
cl_ml_op_qcom op = NULL;
624+
cl_ml_op_qcom op = nullptr;
624625
cl_int result;
625626

626627
cl_ml_tensor_qcom* concatInputs = new cl_ml_tensor_qcom[in_list.size()];
@@ -650,7 +651,7 @@ void CLMLRunner::MakeDense(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_
650651
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> bias_desc,
651652
std::string dtype) {
652653
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
653-
cl_ml_op_qcom op = NULL;
654+
cl_ml_op_qcom op = nullptr;
654655
cl_int result;
655656

656657
cl_ml_op_convolution_desc_qcom conv_desc = {CL_CONVOLUTION_MODE_CONVOLUTION_QCOM,
@@ -681,7 +682,7 @@ void CLMLRunner::MakeSoftMax(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inpu
681682
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
682683
std::string dtype) {
683684
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
684-
cl_ml_op_qcom op = NULL;
685+
cl_ml_op_qcom op = nullptr;
685686
cl_int result;
686687

687688
cl_ml_op_softmax_desc_qcom softmax_desc = {CL_SOFTMAX_ALGORITHM_ACCURATE_QCOM,
@@ -706,7 +707,7 @@ void CLMLRunner::MakePad(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_de
706707
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
707708
std::string pad_mode, std::vector<cl_uint> padding, std::string dtype) {
708709
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
709-
cl_ml_op_qcom op = NULL;
710+
cl_ml_op_qcom op = nullptr;
710711
cl_int result;
711712

712713
cl_pad_mode_qcom clml_pad_mode = CL_PAD_MODE_CONSTANT_QCOM;
@@ -741,7 +742,7 @@ void CLMLRunner::MakeBatchFlatten(std::shared_ptr<cl_ml_tensor_memory_desc_qcom>
741742
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
742743
std::string dtype) {
743744
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
744-
cl_ml_op_qcom op = NULL;
745+
cl_ml_op_qcom op = nullptr;
745746
cl_int result;
746747

747748
result = h_ClmlIntf->clCreateMLOpReshapeQCOM(this->context, 0, input_desc->tensor,
@@ -763,7 +764,7 @@ void CLMLRunner::MakeClip(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> input_d
763764
float a_min, std::string dtype) {
764765
LOG(INFO) << "MakeClip called";
765766
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
766-
cl_ml_op_qcom op = NULL;
767+
cl_ml_op_qcom op = nullptr;
767768
cl_int result;
768769

769770
cl_ml_op_clip_desc_qcom clip_desc = {
@@ -788,7 +789,7 @@ void CLMLRunner::MakeBinaryOp(std::shared_ptr<cl_ml_tensor_memory_desc_qcom> inp
788789
std::shared_ptr<cl_ml_tensor_memory_desc_qcom> output_desc,
789790
std::string op_name, std::string dtype) {
790791
cl_arithmetic_mode_qcom cl_arithmetic_mode = MakeCLArithMode(MakeCLDataType(dtype));
791-
cl_ml_op_qcom op = NULL;
792+
cl_ml_op_qcom op = nullptr;
792793
cl_int result;
793794

794795
cl_binary_op_qcom binary_op = CL_TENSOR_OP_ADD_QCOM;

apps/cpp_clml/clml_runner.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -229,20 +229,20 @@ class CLMLRunner {
229229
/*! \brief ML API interface */
230230
GET_ML_API_INTERFACE* h_ClmlIntf = nullptr;
231231
/*! \brief Tuning cache object */
232-
cl_ml_tuningcache_qcom tuning_cache = NULL;
232+
cl_ml_tuningcache_qcom tuning_cache = nullptr;
233233
/*! \brief Flag to inticate a tuning run */
234234
bool is_tuning_run;
235235
/*! \brief The tuning file for loading or storing cache */
236236
char* tuning_file;
237237

238238
/*! \brief OpenCL platform */
239-
cl_platform_id platform{NULL};
239+
cl_platform_id platform{nullptr};
240240
/*! \brief OpenCL context */
241-
cl_context context{NULL};
241+
cl_context context{nullptr};
242242
/*! \brief OpenCL device */
243-
cl_device_id device_id{NULL};
243+
cl_device_id device_id{nullptr};
244244
/*! \brief OpenCL Queue */
245-
cl_command_queue queue{NULL};
245+
cl_command_queue queue{nullptr};
246246
/*! \brief Numpy object for params */
247247
cnpy::npz_t npz_params;
248248
/*! \brief Numpy object for inputs */

apps/cpp_clml/main.cc

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,11 @@ void ParseCmdArgs(int argc, char* argv[], struct ToolArgs& args) {
148148
bool ExtensionStringPresent(cl_platform_id platform_id, cl_device_id device_id) {
149149
cl_int result = 0;
150150
size_t reqd_size = 0;
151-
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, 0, NULL, &reqd_size);
151+
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, 0, nullptr, &reqd_size);
152152
CLML_SDK_TEST_AND_EXIT(reqd_size > 0u && result == CL_SUCCESS);
153153

154154
std::vector<char> buf(reqd_size);
155-
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, reqd_size, buf.data(), NULL);
155+
result = clGetDeviceInfo(device_id, CL_DEVICE_EXTENSIONS, reqd_size, buf.data(), nullptr);
156156
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
157157

158158
std::string extensions(buf.data());
@@ -174,25 +174,25 @@ int ExecuteModel(ToolArgs& args) {
174174
// Init OpenCL Environment
175175
cl_int result;
176176
cl_event readEvent = nullptr;
177-
cl_platform_id platform = NULL;
178-
cl_context context = NULL;
179-
cl_device_id device_id = NULL;
180-
cl_command_queue queue = NULL;
177+
cl_platform_id platform = nullptr;
178+
cl_context context = nullptr;
179+
cl_device_id device_id = nullptr;
180+
cl_command_queue queue = nullptr;
181181

182182
// Initialize Context and Command Queue
183-
result = clGetPlatformIDs(1, &platform, NULL);
183+
result = clGetPlatformIDs(1, &platform, nullptr);
184184
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
185185

186186
uint32_t num_devices = 0;
187-
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, NULL, &num_devices);
187+
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 0, nullptr, &num_devices);
188188
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS && num_devices == 1);
189189

190-
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device_id, NULL);
190+
result = clGetDeviceIDs(platform, CL_DEVICE_TYPE_GPU, 1, &device_id, nullptr);
191191
CLML_SDK_TEST_AND_EXIT(device_id && result == CL_SUCCESS);
192192

193193
CLML_SDK_TEST_AND_EXIT(ExtensionStringPresent(platform, device_id) == true);
194194

195-
context = clCreateContext(0, 1, &device_id, NULL, NULL, &result);
195+
context = clCreateContext(0, 1, &device_id, nullptr, nullptr, &result);
196196
CLML_SDK_TEST_AND_EXIT(result == CL_SUCCESS);
197197

198198
cl_command_queue_properties queue_props = 0;

0 commit comments

Comments
 (0)