diff --git a/src/grid/common/grid_basis_set.c b/src/grid/common/grid_basis_set.c index a8494e67ba..749548e669 100644 --- a/src/grid/common/grid_basis_set.c +++ b/src/grid/common/grid_basis_set.c @@ -5,6 +5,7 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /*----------------------------------------------------------------------------*/ +#include #include #include @@ -24,6 +25,7 @@ void grid_create_basis_set(const int nset, const int nsgf, const int maxco, grid_basis_set **basis_set_out) { grid_basis_set *basis_set = malloc(sizeof(grid_basis_set)); + assert(basis_set != NULL); basis_set->nset = nset; basis_set->nsgf = nsgf; @@ -32,20 +34,27 @@ void grid_create_basis_set(const int nset, const int nsgf, const int maxco, size_t size = nset * sizeof(int); basis_set->lmin = malloc(size); + assert(basis_set->lmin != NULL); memcpy(basis_set->lmin, lmin, size); basis_set->lmax = malloc(size); + assert(basis_set->lmax != NULL); memcpy(basis_set->lmax, lmax, size); basis_set->npgf = malloc(size); + assert(basis_set->npgf != NULL); memcpy(basis_set->npgf, npgf, size); basis_set->nsgf_set = malloc(size); + assert(basis_set->nsgf_set != NULL); memcpy(basis_set->nsgf_set, nsgf_set, size); basis_set->first_sgf = malloc(size); + assert(basis_set->first_sgf != NULL); memcpy(basis_set->first_sgf, first_sgf, size); size = nsgf * maxco * sizeof(double); basis_set->sphi = malloc(size); + assert(basis_set->sphi != NULL); memcpy(basis_set->sphi, sphi, size); size = nset * maxpgf * sizeof(double); basis_set->zet = malloc(size); + assert(basis_set->zet != NULL); memcpy(basis_set->zet, zet, size); *basis_set_out = basis_set; diff --git a/src/grid/common/grid_library.c b/src/grid/common/grid_library.c index 8cfef47ee6..781770f5a5 100644 --- a/src/grid/common/grid_library.c +++ b/src/grid/common/grid_library.c @@ -63,6 +63,7 @@ void grid_library_init(void) { max_threads = omp_get_max_threads(); per_thread_globals = malloc(max_threads * sizeof(grid_library_globals *)); + assert(per_thread_globals != NULL); // Using parallel regions to ensure memory is allocated near a thread's core. #pragma omp parallel default(none) shared(per_thread_globals) \ @@ -70,6 +71,7 @@ void grid_library_init(void) { { const int ithread = omp_get_thread_num(); per_thread_globals[ithread] = malloc(sizeof(grid_library_globals)); + assert(per_thread_globals[ithread] != NULL); memset(per_thread_globals[ithread], 0, sizeof(grid_library_globals)); } diff --git a/src/grid/common/grid_sphere_cache.c b/src/grid/common/grid_sphere_cache.c index ffe1eba9ea..2667746204 100644 --- a/src/grid/common/grid_sphere_cache.c +++ b/src/grid/common/grid_sphere_cache.c @@ -71,6 +71,7 @@ static void rebuild_cache_entry(const int max_imr, const double drmin, // Compute required storage size. entry->offsets = malloc(max_imr * sizeof(int)); + assert(entry->offsets != NULL); int nbounds_total = 0; for (int imr = 1; imr <= max_imr; imr++) { const double radius = imr * drmin; @@ -81,6 +82,7 @@ static void rebuild_cache_entry(const int max_imr, const double drmin, // Allocate and fill storage. entry->storage = malloc(nbounds_total * sizeof(int)); + assert(entry->storage != NULL); for (int imr = 1; imr <= max_imr; imr++) { const double radius = imr * drmin; const int offset = entry->offsets[imr - 1]; @@ -102,7 +104,7 @@ void grid_sphere_cache_lookup(const double radius, const double dh[3][3], // Find or create cache entry for given grid. const double dr0 = dh[0][0], dr1 = dh[1][1], dr2 = dh[2][2]; - grid_sphere_cache_entry *entry = 0; + grid_sphere_cache_entry *entry = NULL; bool found = false; // Fast path: check prev match. @@ -131,6 +133,7 @@ void grid_sphere_cache_lookup(const double radius, const double dh[3][3], grid_sphere_cache_entry *old_entries = cache->entries; const size_t entry_size = sizeof(grid_sphere_cache_entry); cache->entries = malloc(cache->size * entry_size); + assert(cache->entries != NULL); memcpy(cache->entries, old_entries, (cache->size - 1) * entry_size); free(old_entries); cache->prev_match = cache->size - 1; diff --git a/src/grid/cpu/grid_cpu_collocate.c b/src/grid/cpu/grid_cpu_collocate.c index 7bca3e495c..ecb6890df0 100644 --- a/src/grid/cpu/grid_cpu_collocate.c +++ b/src/grid/cpu/grid_cpu_collocate.c @@ -201,6 +201,7 @@ void grid_cpu_collocate_pgf_product( if (DUMP_TASKS) { const size_t sizeof_grid = sizeof(double) * npts_local_total; grid_before = malloc(sizeof_grid); + assert(grid_before != NULL); memcpy(grid_before, grid, sizeof_grid); memset(grid, 0, sizeof_grid); } diff --git a/src/grid/cpu/grid_cpu_task_list.c b/src/grid/cpu/grid_cpu_task_list.c index cf85749064..9a8eb8284c 100644 --- a/src/grid/cpu/grid_cpu_task_list.c +++ b/src/grid/cpu/grid_cpu_task_list.c @@ -60,6 +60,7 @@ void grid_cpu_create_task_list( } grid_cpu_task_list *task_list = malloc(sizeof(grid_cpu_task_list)); + assert(task_list != NULL); task_list->orthorhombic = orthorhombic; task_list->ntasks = ntasks; @@ -70,22 +71,27 @@ void grid_cpu_create_task_list( size_t size = nblocks * sizeof(int); task_list->block_offsets = malloc(size); + assert(task_list->block_offsets != NULL); memcpy(task_list->block_offsets, block_offsets, size); size = 3 * natoms * sizeof(double); task_list->atom_positions = malloc(size); + assert(task_list->atom_positions != NULL); memcpy(task_list->atom_positions, atom_positions, size); size = natoms * sizeof(int); task_list->atom_kinds = malloc(size); + assert(task_list->atom_kinds != NULL); memcpy(task_list->atom_kinds, atom_kinds, size); size = nkinds * sizeof(grid_basis_set *); task_list->basis_sets = malloc(size); + assert(task_list->basis_sets != NULL); memcpy(task_list->basis_sets, basis_sets, size); size = ntasks * sizeof(grid_cpu_task); task_list->tasks = malloc(size); + assert(task_list->tasks != NULL); for (int i = 0; i < ntasks; i++) { task_list->tasks[i].level = level_list[i]; task_list->tasks[i].iatom = iatom_list[i]; @@ -105,6 +111,7 @@ void grid_cpu_create_task_list( // Store grid layouts. size = nlevels * sizeof(grid_cpu_layout); task_list->layouts = malloc(size); + assert(task_list->layouts != NULL); for (int level = 0; level < nlevels; level++) { for (int i = 0; i < 3; i++) { task_list->layouts[level].npts_global[i] = npts_global[level][i]; @@ -124,7 +131,9 @@ void grid_cpu_create_task_list( // Find first and last task for each level and block. size = nlevels * nblocks * sizeof(int); task_list->first_level_block_task = malloc(size); + assert(task_list->first_level_block_task != NULL); task_list->last_level_block_task = malloc(size); + assert(task_list->last_level_block_task != NULL); for (int i = 0; i < nlevels * nblocks; i++) { task_list->first_level_block_task[i] = 0; task_list->last_level_block_task[i] = -1; // last < first means no tasks @@ -148,9 +157,11 @@ void grid_cpu_create_task_list( // Initialize thread-local storage. size = omp_get_max_threads() * sizeof(double *); task_list->threadlocals = malloc(size); + assert(task_list->threadlocals != NULL); memset(task_list->threadlocals, 0, size); size = omp_get_max_threads() * sizeof(size_t); task_list->threadlocal_sizes = malloc(size); + assert(task_list->threadlocal_sizes != NULL); memset(task_list->threadlocal_sizes, 0, size); *task_list_out = task_list; @@ -272,6 +283,7 @@ static void collocate_one_grid_level( free(task_list->threadlocals[ithread]); } task_list->threadlocals[ithread] = malloc(grid_size); + assert(task_list->threadlocals[ithread] != NULL); task_list->threadlocal_sizes[ithread] = grid_size; } diff --git a/src/grid/dgemm/grid_dgemm_collocation_integration.c b/src/grid/dgemm/grid_dgemm_collocation_integration.c index a116b0ff8e..c025bb2e8b 100644 --- a/src/grid/dgemm/grid_dgemm_collocation_integration.c +++ b/src/grid/dgemm/grid_dgemm_collocation_integration.c @@ -5,6 +5,7 @@ /* SPDX-License-Identifier: BSD-3-Clause */ /*----------------------------------------------------------------------------*/ +#include #include #include #include @@ -21,10 +22,7 @@ struct collocation_integration_ *collocate_create_handle(void) { struct collocation_integration_ *handle = NULL; handle = (struct collocation_integration_ *)malloc( sizeof(struct collocation_integration_)); - - if (handle == NULL) { - abort(); - } + assert(handle != NULL); memset(handle, 0, sizeof(struct collocation_integration_)); handle->alpha.alloc_size_ = 8192; @@ -39,6 +37,7 @@ struct collocation_integration_ *collocate_create_handle(void) { handle->pol_alloc_size = realloc_tensor(&handle->pol); handle->scratch = malloc(32768 * sizeof(double)); + assert(handle->scratch != NULL); handle->scratch_alloc_size = 32768; handle->T_alloc_size = 8192; handle->W_alloc_size = 2048; @@ -46,11 +45,14 @@ struct collocation_integration_ *collocate_create_handle(void) { handle->blockDim[1] = 5; handle->blockDim[2] = 5; handle->device_id = (int *)malloc(sizeof(double) * 12); + assert(handle->device_id != NULL); handle->number_of_devices = 1; /* to suppress when we remove the spherical cutoff */ handle->map = (int **)malloc(3 * sizeof(int *)); + assert(handle->map != NULL); handle->map[0] = (int *)malloc(sizeof(int) * 512 * 3); + assert(handle->map[0] != NULL); handle->map[1] = handle->map[0] + 512; handle->map[2] = handle->map[1] + 512; handle->cmax = 512 * 3; @@ -105,8 +107,7 @@ void initialize_W_and_T(collocation_integration *const handler, if (handler->scratch) free(handler->scratch); handler->scratch = malloc(sizeof(double) * handler->scratch_alloc_size); - if (handler->scratch == NULL) - abort(); + assert(handler->scratch != NULL); } } @@ -135,8 +136,7 @@ void initialize_W_and_T_integrate(collocation_integration *const handler, if (handler->scratch) free(handler->scratch); handler->scratch = malloc(sizeof(double) * handler->scratch_alloc_size); - if (handler->scratch == NULL) - abort(); + assert(handler->scratch != NULL); } } diff --git a/src/grid/dgemm/grid_dgemm_context.c b/src/grid/dgemm/grid_dgemm_context.c index c9f2486d8e..643e0e439f 100644 --- a/src/grid/dgemm/grid_dgemm_context.c +++ b/src/grid/dgemm/grid_dgemm_context.c @@ -92,6 +92,7 @@ void update_atoms_position(const int natoms, realloc(data->atom_positions, 3 * natoms * sizeof(double)); } } + assert(data->atom_positions != NULL); data->natoms = natoms; @@ -119,6 +120,7 @@ void update_atoms_kinds(const int natoms, const int *atoms_kinds, data->atom_kinds = realloc(data->atom_kinds, natoms * sizeof(int)); } } + assert(data->atom_kinds != NULL); // data->natoms is initialized before calling this function if (data->natoms) memcpy(data->atom_kinds, atoms_kinds, sizeof(int) * natoms); @@ -142,6 +144,7 @@ void update_block_offsets(const int nblocks, const int *const block_offsets, data->block_offsets = realloc(data->block_offsets, sizeof(int) * nblocks); } } + assert(data->block_offsets != NULL); data->nblocks = nblocks; data->nblocks_total = imax(data->nblocks_total, nblocks); @@ -159,6 +162,7 @@ void update_basis_set(const int nkinds, const grid_basis_set **const basis_sets, realloc(data->basis_sets, nkinds * sizeof(grid_basis_set *)); } } + assert(data->basis_sets != NULL); data->nkinds = nkinds; data->nkinds_total = imax(data->nkinds_total, nkinds); memcpy(data->basis_sets, basis_sets, nkinds * sizeof(grid_basis_set *)); @@ -193,9 +197,11 @@ void update_task_lists(const int nlevels, const int ntasks, if (ctx->nlevels_total < nlevels) { /* save the address of the full task list. NULL when completly empty */ ctx->tasks = realloc(ctx->tasks, nlevels * sizeof(_task *)); + assert(ctx->tasks != NULL); } if (ctx->ntasks_total < ntasks) { ctx->tasks[0] = realloc(ctx->tasks[0], ntasks * sizeof(_task)); + assert(ctx->tasks[0] != NULL); } } @@ -344,6 +350,7 @@ void update_grid(const int nlevels, grid_context *ctx) { ctx->grid = realloc(ctx->grid, sizeof(tensor) * nlevels); } } + assert(ctx->grid != NULL); ctx->nlevels_total = imax(ctx->nlevels_total, nlevels); ctx->nlevels = nlevels; @@ -447,10 +454,12 @@ void initialize_grid_context_on_gpu(void *ptr, const int number_of_devices, ctx->number_of_devices = number_of_devices; ctx->queue_length = 8192; - if (ctx->device_id == NULL) + if (ctx->device_id == NULL) { ctx->device_id = malloc(sizeof(int) * number_of_devices); - else + } else { ctx->device_id = realloc(ctx->device_id, sizeof(int) * number_of_devices); + } + assert(ctx->device_id != NULL); memcpy(ctx->device_id, device_id, sizeof(int) * number_of_devices); } diff --git a/src/grid/dgemm/grid_dgemm_integrate.c b/src/grid/dgemm/grid_dgemm_integrate.c index d194576987..2ec318f6f1 100644 --- a/src/grid/dgemm/grid_dgemm_integrate.c +++ b/src/grid/dgemm/grid_dgemm_integrate.c @@ -1074,6 +1074,7 @@ void grid_dgemm_integrate_task_list( if (ctx->scratch == NULL) ctx->scratch = malloc(hab_blocks->size * max_threads); + assert(ctx->scratch != NULL); // #pragma omp parallel for for (int level = 0; level < ctx->nlevels; level++) { diff --git a/src/grid/dgemm/grid_dgemm_tensor_local.c b/src/grid/dgemm/grid_dgemm_tensor_local.c index 32eab3b52f..e864ff548d 100644 --- a/src/grid/dgemm/grid_dgemm_tensor_local.c +++ b/src/grid/dgemm/grid_dgemm_tensor_local.c @@ -10,7 +10,7 @@ #include "grid_dgemm_utils.h" size_t realloc_tensor(tensor *t) { - assert(t); + assert(t != NULL); if (t->alloc_size_ == 0) { /* there is a mistake somewhere. We can not have t->old_alloc_size_ != 0 and @@ -29,8 +29,7 @@ size_t realloc_tensor(tensor *t) { if (t->data == NULL) { t->data = malloc(sizeof(double) * t->alloc_size_); - if (!t->data) - abort(); + assert(t->data != NULL); t->old_alloc_size_ = t->alloc_size_; } @@ -38,13 +37,10 @@ size_t realloc_tensor(tensor *t) { } void alloc_tensor(tensor *t) { - if (t == NULL) { - abort(); - } + assert(t != NULL); t->data = malloc(sizeof(double) * t->alloc_size_); - if (!t->data) - abort(); + assert(t->data != NULL); t->old_alloc_size_ = t->alloc_size_; } diff --git a/src/grid/grid_task_list.c b/src/grid/grid_task_list.c index 62cd127f1c..154852f53f 100644 --- a/src/grid/grid_task_list.c +++ b/src/grid/grid_task_list.c @@ -43,6 +43,7 @@ void grid_create_task_list( if (*task_list_out == NULL) { task_list = malloc(sizeof(grid_task_list)); + assert(task_list != NULL); memset(task_list, 0, sizeof(grid_task_list)); // Resolve AUTO to a concrete backend. @@ -68,6 +69,7 @@ void grid_create_task_list( task_list->nlevels = nlevels; size_t size = nlevels * 3 * sizeof(int); task_list->npts_local = malloc(size); + assert(task_list->npts_local != NULL); memcpy(task_list->npts_local, npts_local, size); // Always create reference backend because it might be needed for validation. diff --git a/src/grid/ref/grid_ref_task_list.c b/src/grid/ref/grid_ref_task_list.c index 0e462d4e65..2e535a829d 100644 --- a/src/grid/ref/grid_ref_task_list.c +++ b/src/grid/ref/grid_ref_task_list.c @@ -60,6 +60,7 @@ void grid_ref_create_task_list( } grid_ref_task_list *task_list = malloc(sizeof(grid_ref_task_list)); + assert(task_list != NULL); task_list->orthorhombic = orthorhombic; task_list->ntasks = ntasks; @@ -70,22 +71,27 @@ void grid_ref_create_task_list( size_t size = nblocks * sizeof(int); task_list->block_offsets = malloc(size); + assert(task_list->block_offsets != NULL); memcpy(task_list->block_offsets, block_offsets, size); size = 3 * natoms * sizeof(double); task_list->atom_positions = malloc(size); + assert(task_list->atom_positions != NULL); memcpy(task_list->atom_positions, atom_positions, size); size = natoms * sizeof(int); task_list->atom_kinds = malloc(size); + assert(task_list->atom_kinds != NULL); memcpy(task_list->atom_kinds, atom_kinds, size); size = nkinds * sizeof(grid_basis_set *); task_list->basis_sets = malloc(size); + assert(task_list->basis_sets != NULL); memcpy(task_list->basis_sets, basis_sets, size); size = ntasks * sizeof(grid_ref_task); task_list->tasks = malloc(size); + assert(task_list->tasks != NULL); for (int i = 0; i < ntasks; i++) { task_list->tasks[i].level = level_list[i]; task_list->tasks[i].iatom = iatom_list[i]; @@ -105,6 +111,7 @@ void grid_ref_create_task_list( // Store grid layouts. size = nlevels * sizeof(grid_ref_layout); task_list->layouts = malloc(size); + assert(task_list->layouts != NULL); for (int level = 0; level < nlevels; level++) { for (int i = 0; i < 3; i++) { task_list->layouts[level].npts_global[i] = npts_global[level][i]; @@ -124,7 +131,9 @@ void grid_ref_create_task_list( // Find first and last task for each level and block. size = nlevels * nblocks * sizeof(int); task_list->first_level_block_task = malloc(size); + assert(task_list->first_level_block_task != NULL); task_list->last_level_block_task = malloc(size); + assert(task_list->last_level_block_task != NULL); for (int i = 0; i < nlevels * nblocks; i++) { task_list->first_level_block_task[i] = 0; task_list->last_level_block_task[i] = -1; // last < first means no tasks @@ -148,9 +157,11 @@ void grid_ref_create_task_list( // Initialize thread-local storage. size = omp_get_max_threads() * sizeof(double *); task_list->threadlocals = malloc(size); + assert(task_list->threadlocals != NULL); memset(task_list->threadlocals, 0, size); size = omp_get_max_threads() * sizeof(size_t); task_list->threadlocal_sizes = malloc(size); + assert(task_list->threadlocal_sizes != NULL); memset(task_list->threadlocal_sizes, 0, size); *task_list_out = task_list; @@ -272,6 +283,7 @@ static void collocate_one_grid_level( free(task_list->threadlocals[ithread]); } task_list->threadlocals[ithread] = malloc(grid_size); + assert(task_list->threadlocals[ithread] != NULL); task_list->threadlocal_sizes[ithread] = grid_size; }