Skip to content

Commit 1825cbc

Browse files
Jieyang ChenJieyangChen7
Jieyang Chen
authored andcommitted
Replace NULL with nullptr in DeviceAdapters
1 parent 3745ee7 commit 1825cbc

File tree

6 files changed

+40
-41
lines changed

6 files changed

+40
-41
lines changed

build_scripts/build_mgard_adios2_cuda_summit.sh

-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,6 @@ mgard_x_install_dir=${install_dir}
8080
mkdir -p ${mgard_x_build_dir}
8181
cmake -S ${mgard_x_src_dir} -B ${mgard_x_build_dir} \
8282
-DCMAKE_PREFIX_PATH="${nvcomp_install_dir};${zstd_install_dir}/lib/cmake/zstd;${protobuf_install_dir}"\
83-
-DMGARD_ENABLE_SERIAL=ON\
8483
-DMGARD_ENABLE_CUDA=ON\
8584
-DCMAKE_CUDA_ARCHITECTURES="70"\
8685
-DMGARD_ENABLE_DOCS=OFF\

include/mgard-x/RuntimeX/DeviceAdapters/DeviceAdapterCuda.h

+12-12
Original file line numberDiff line numberDiff line change
@@ -582,11 +582,11 @@ template <> class DeviceQueues<CUDA> {
582582
delete[] streams[d];
583583
}
584584
delete[] streams;
585-
streams = NULL;
585+
streams = nullptr;
586586
}
587587

588588
int NumDevices;
589-
cudaStream_t **streams = NULL;
589+
cudaStream_t **streams = nullptr;
590590
};
591591

592592
extern int cuda_dev_id;
@@ -793,7 +793,7 @@ template <> class MemoryManager<CUDA> {
793793
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
794794
DeviceRuntime<CUDA>::SyncQueue(queue_idx);
795795
}
796-
if (ptr == NULL)
796+
if (ptr == nullptr)
797797
return;
798798
gpuErrchk(cudaFree(ptr));
799799
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
@@ -865,7 +865,7 @@ template <> class MemoryManager<CUDA> {
865865
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
866866
DeviceRuntime<CUDA>::SyncQueue(queue_idx);
867867
}
868-
if (ptr == NULL)
868+
if (ptr == nullptr)
869869
return;
870870
gpuErrchk(cudaFreeHost(ptr));
871871
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
@@ -2399,7 +2399,7 @@ template <> class DeviceCollective<CUDA> {
23992399
SubArray<1, T, CUDA> result,
24002400
Array<1, Byte, CUDA> &workspace, int queue_idx) {
24012401
Byte *d_temp_storage =
2402-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2402+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24032403
size_t temp_storage_bytes =
24042404
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24052405
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2416,7 +2416,7 @@ template <> class DeviceCollective<CUDA> {
24162416
AbsMax(SIZE n, SubArray<1, T, CUDA> v, SubArray<1, T, CUDA> result,
24172417
Array<1, Byte, CUDA> &workspace, int queue_idx) {
24182418
Byte *d_temp_storage =
2419-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2419+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24202420
size_t temp_storage_bytes =
24212421
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24222422
AbsMaxOp absMaxOp;
@@ -2437,7 +2437,7 @@ template <> class DeviceCollective<CUDA> {
24372437
cub::TransformInputIterator<T, SquareOp, T *> transformed_input_iter(
24382438
v.data(), squareOp);
24392439
Byte *d_temp_storage =
2440-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2440+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24412441
size_t temp_storage_bytes =
24422442
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24432443
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2455,7 +2455,7 @@ template <> class DeviceCollective<CUDA> {
24552455
ScanSumInclusive(SIZE n, SubArray<1, T, CUDA> v, SubArray<1, T, CUDA> result,
24562456
Array<1, Byte, CUDA> &workspace, int queue_idx) {
24572457
Byte *d_temp_storage =
2458-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2458+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24592459
size_t temp_storage_bytes =
24602460
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24612461
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2472,7 +2472,7 @@ template <> class DeviceCollective<CUDA> {
24722472
ScanSumExclusive(SIZE n, SubArray<1, T, CUDA> v, SubArray<1, T, CUDA> result,
24732473
Array<1, Byte, CUDA> &workspace, int queue_idx) {
24742474
Byte *d_temp_storage =
2475-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2475+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24762476
size_t temp_storage_bytes =
24772477
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24782478
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2489,7 +2489,7 @@ template <> class DeviceCollective<CUDA> {
24892489
ScanSumExtended(SIZE n, SubArray<1, T, CUDA> v, SubArray<1, T, CUDA> result,
24902490
Array<1, Byte, CUDA> &workspace, int queue_idx) {
24912491
Byte *d_temp_storage =
2492-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2492+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
24932493
size_t temp_storage_bytes =
24942494
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
24952495
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2513,7 +2513,7 @@ template <> class DeviceCollective<CUDA> {
25132513
Array<1, Byte, CUDA> &workspace,
25142514
int queue_idx) {
25152515
Byte *d_temp_storage =
2516-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2516+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
25172517
size_t temp_storage_bytes =
25182518
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
25192519
cudaStream_t stream = DeviceRuntime<CUDA>::GetQueue(queue_idx);
@@ -2556,4 +2556,4 @@ template <> class DeviceCollective<CUDA> {
25562556

25572557
} // namespace mgard_x
25582558

2559-
#endif
2559+
#endif

include/mgard-x/RuntimeX/DeviceAdapters/DeviceAdapterHip.h

+12-12
Original file line numberDiff line numberDiff line change
@@ -494,11 +494,11 @@ template <> class DeviceQueues<HIP> {
494494
delete[] streams[d];
495495
}
496496
delete[] streams;
497-
streams = NULL;
497+
streams = nullptr;
498498
}
499499

500500
int NumDevices;
501-
hipStream_t **streams = NULL;
501+
hipStream_t **streams = nullptr;
502502
};
503503

504504
extern int hip_dev_id;
@@ -718,7 +718,7 @@ template <> class MemoryManager<HIP> {
718718
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
719719
DeviceRuntime<HIP>::SyncQueue(queue_idx);
720720
}
721-
if (ptr == NULL)
721+
if (ptr == nullptr)
722722
return;
723723
gpuErrchk(hipFree(ptr));
724724
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
@@ -803,7 +803,7 @@ template <> class MemoryManager<HIP> {
803803
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
804804
DeviceRuntime<HIP>::SyncQueue(queue_idx);
805805
}
806-
if (ptr == NULL)
806+
if (ptr == nullptr)
807807
return;
808808
gpuErrchk(hipFreeHost(ptr));
809809
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
@@ -2235,7 +2235,7 @@ template <> class DeviceCollective<HIP> {
22352235
SubArray<1, T, HIP> result,
22362236
Array<1, Byte, HIP> &workspace, int queue_idx) {
22372237
Byte *d_temp_storage =
2238-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2238+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
22392239
size_t temp_storage_bytes =
22402240
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
22412241
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2252,7 +2252,7 @@ template <> class DeviceCollective<HIP> {
22522252
AbsMax(SIZE n, SubArray<1, T, HIP> v, SubArray<1, T, HIP> result,
22532253
Array<1, Byte, HIP> &workspace, int queue_idx) {
22542254
Byte *d_temp_storage =
2255-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2255+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
22562256
size_t temp_storage_bytes =
22572257
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
22582258
AbsMaxOp absMaxOp;
@@ -2273,7 +2273,7 @@ template <> class DeviceCollective<HIP> {
22732273
hipcub::TransformInputIterator<T, SquareOp, T *> transformed_input_iter(
22742274
v.data(), squareOp);
22752275
Byte *d_temp_storage =
2276-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2276+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
22772277
size_t temp_storage_bytes =
22782278
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
22792279
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2291,7 +2291,7 @@ template <> class DeviceCollective<HIP> {
22912291
ScanSumInclusive(SIZE n, SubArray<1, T, HIP> v, SubArray<1, T, HIP> result,
22922292
Array<1, Byte, HIP> &workspace, int queue_idx) {
22932293
Byte *d_temp_storage =
2294-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2294+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
22952295
size_t temp_storage_bytes =
22962296
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
22972297
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2308,7 +2308,7 @@ template <> class DeviceCollective<HIP> {
23082308
ScanSumExclusive(SIZE n, SubArray<1, T, HIP> v, SubArray<1, T, HIP> result,
23092309
Array<1, Byte, HIP> &workspace, int queue_idx) {
23102310
Byte *d_temp_storage =
2311-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2311+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
23122312
size_t temp_storage_bytes =
23132313
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
23142314
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2325,7 +2325,7 @@ template <> class DeviceCollective<HIP> {
23252325
ScanSumExtended(SIZE n, SubArray<1, T, HIP> v, SubArray<1, T, HIP> result,
23262326
Array<1, Byte, HIP> &workspace, int queue_idx) {
23272327
Byte *d_temp_storage =
2328-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2328+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
23292329
size_t temp_storage_bytes =
23302330
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
23312331
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2349,7 +2349,7 @@ template <> class DeviceCollective<HIP> {
23492349
SubArray<1, ValueT, HIP> out_values, Array<1, Byte, HIP> &workspace,
23502350
int queue_idx) {
23512351
Byte *d_temp_storage =
2352-
workspace.hasDeviceAllocation() ? workspace.data() : NULL;
2352+
workspace.hasDeviceAllocation() ? workspace.data() : nullptr;
23532353
size_t temp_storage_bytes =
23542354
workspace.hasDeviceAllocation() ? workspace.shape(0) : 0;
23552355
hipStream_t stream = DeviceRuntime<HIP>::GetQueue(queue_idx);
@@ -2366,4 +2366,4 @@ template <> class DeviceCollective<HIP> {
23662366

23672367
} // namespace mgard_x
23682368

2369-
#endif
2369+
#endif

include/mgard-x/RuntimeX/DeviceAdapters/DeviceAdapterOpenmp.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -317,7 +317,7 @@ template <> class MemoryManager<OPENMP> {
317317
using converted_T =
318318
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
319319
ptr = (T *)std::malloc(n * sizeof(converted_T));
320-
if (ptr == NULL) {
320+
if (ptr == nullptr) {
321321
log::err("MemoryManager<OPENMP>::Malloc1D error.");
322322
}
323323
}
@@ -330,7 +330,7 @@ template <> class MemoryManager<OPENMP> {
330330
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
331331
ptr = (T *)std::malloc(n1 * n2 * sizeof(converted_T));
332332
ld = n1;
333-
if (ptr == NULL) {
333+
if (ptr == nullptr) {
334334
log::err("MemoryManager<OPENMP>::MallocND error.");
335335
}
336336
}
@@ -342,7 +342,7 @@ template <> class MemoryManager<OPENMP> {
342342
using converted_T =
343343
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
344344
ptr = (T *)std::malloc(n * sizeof(converted_T));
345-
if (ptr == NULL) {
345+
if (ptr == nullptr) {
346346
log::err("MemoryManager<OPENMP>::MallocManaged1D error.");
347347
}
348348
}
@@ -351,7 +351,7 @@ template <> class MemoryManager<OPENMP> {
351351
MGARDX_CONT static void Free(T *ptr,
352352
int queue_idx = MGARDX_SYNCHRONIZED_QUEUE) {
353353
log::dbg("Calling MemoryManager<OPENMP>::Free");
354-
if (ptr == NULL)
354+
if (ptr == nullptr)
355355
return;
356356
std::free(ptr);
357357
}
@@ -392,7 +392,7 @@ template <> class MemoryManager<OPENMP> {
392392
using converted_T =
393393
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
394394
ptr = (T *)std::malloc(n * sizeof(converted_T));
395-
if (ptr == NULL) {
395+
if (ptr == nullptr) {
396396
log::err("MemoryManager<OPENMP>::MallocHost error.");
397397
}
398398
}
@@ -401,7 +401,7 @@ template <> class MemoryManager<OPENMP> {
401401
MGARDX_CONT static void FreeHost(T *ptr,
402402
int queue_idx = MGARDX_SYNCHRONIZED_QUEUE) {
403403
log::dbg("Calling MemoryManager<OPENMP>::FreeHost");
404-
if (ptr == NULL)
404+
if (ptr == nullptr)
405405
return;
406406
std::free(ptr);
407407
}

include/mgard-x/RuntimeX/DeviceAdapters/DeviceAdapterSerial.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -812,7 +812,7 @@ template <> class MemoryManager<SERIAL> {
812812
using converted_T =
813813
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
814814
ptr = (T *)std::malloc(n * sizeof(converted_T));
815-
if (ptr == NULL) {
815+
if (ptr == nullptr) {
816816
log::err("MemoryManager<SERIAL>::Malloc1D error.");
817817
}
818818
}
@@ -825,7 +825,7 @@ template <> class MemoryManager<SERIAL> {
825825
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
826826
ptr = (T *)std::malloc(n1 * n2 * sizeof(converted_T));
827827
ld = n1;
828-
if (ptr == NULL) {
828+
if (ptr == nullptr) {
829829
log::err("MemoryManager<SERIAL>::MallocND error.");
830830
}
831831
}
@@ -837,7 +837,7 @@ template <> class MemoryManager<SERIAL> {
837837
using converted_T =
838838
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
839839
ptr = (T *)std::malloc(n * sizeof(converted_T));
840-
if (ptr == NULL) {
840+
if (ptr == nullptr) {
841841
log::err("MemoryManager<SERIAL>::MallocManaged1D error.");
842842
}
843843
}
@@ -846,7 +846,7 @@ template <> class MemoryManager<SERIAL> {
846846
MGARDX_CONT static void Free(T *ptr,
847847
int queue_idx = MGARDX_SYNCHRONIZED_QUEUE) {
848848
log::dbg("Calling MemoryManager<SERIAL>::Free");
849-
if (ptr == NULL)
849+
if (ptr == nullptr)
850850
return;
851851
std::free(ptr);
852852
}
@@ -887,7 +887,7 @@ template <> class MemoryManager<SERIAL> {
887887
using converted_T =
888888
typename std::conditional<std::is_same<T, void>::value, Byte, T>::type;
889889
ptr = (T *)std::malloc(n * sizeof(converted_T));
890-
if (ptr == NULL) {
890+
if (ptr == nullptr) {
891891
log::err("MemoryManager<SERIAL>::MallocHost error.");
892892
}
893893
}
@@ -896,7 +896,7 @@ template <> class MemoryManager<SERIAL> {
896896
MGARDX_CONT static void FreeHost(T *ptr,
897897
int queue_idx = MGARDX_SYNCHRONIZED_QUEUE) {
898898
log::dbg("Calling MemoryManager<SERIAL>::FreeHost");
899-
if (ptr == NULL)
899+
if (ptr == nullptr)
900900
return;
901901
std::free(ptr);
902902
}

include/mgard-x/RuntimeX/DeviceAdapters/DeviceAdapterSycl.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -342,11 +342,11 @@ template <> class DeviceQueues<SYCL> {
342342
delete[] queues[d];
343343
}
344344
delete[] queues;
345-
queues = NULL;
345+
queues = nullptr;
346346
}
347347

348348
int NumDevices;
349-
sycl::queue **queues = NULL;
349+
sycl::queue **queues = nullptr;
350350
};
351351

352352
extern int sycl_dev_id;
@@ -505,7 +505,7 @@ template <> class MemoryManager<SYCL> {
505505
DeviceRuntime<SYCL>::SyncQueue(queue_idx);
506506
}
507507
sycl::queue q = DeviceRuntime<SYCL>::GetQueue(queue_idx);
508-
if (ptr == NULL)
508+
if (ptr == nullptr)
509509
return;
510510
sycl::free(ptr, q);
511511
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {
@@ -584,7 +584,7 @@ template <> class MemoryManager<SYCL> {
584584
DeviceRuntime<SYCL>::SyncQueue(queue_idx);
585585
}
586586
sycl::queue q = DeviceRuntime<SYCL>::GetQueue(queue_idx);
587-
if (ptr == NULL)
587+
if (ptr == nullptr)
588588
return;
589589
sycl::free(ptr, q);
590590
if (queue_idx == MGARDX_SYNCHRONIZED_QUEUE) {

0 commit comments

Comments
 (0)