Skip to content

unify return values for destroy functions #1356

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion examples/custom_file_provider/custom_file_provider.c
Original file line number Diff line number Diff line change
Expand Up @@ -104,11 +104,12 @@ static umf_result_t file_init(const void *params, void **provider) {
}

// Function to deinitialize the file provider
static void file_deinit(void *provider) {
static umf_result_t file_deinit(void *provider) {
file_provider_t *file_provider = (file_provider_t *)provider;
munmap(file_provider->ptr, ADDRESS_RESERVATION);
close(file_provider->fd);
free(file_provider);
return UMF_RESULT_SUCCESS;
}

// Function to allocate memory from the file provider
Expand Down
3 changes: 2 additions & 1 deletion include/umf/memory_pool.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@ umf_result_t umfPoolCreate(const umf_memory_pool_ops_t *ops,
///
/// @brief Destroys memory pool.
/// @param hPool handle to the pool
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
///
void umfPoolDestroy(umf_memory_pool_handle_t hPool);
umf_result_t umfPoolDestroy(umf_memory_pool_handle_t hPool);

///
/// @brief Allocates \p size bytes of uninitialized storage from \p hPool
Expand Down
2 changes: 1 addition & 1 deletion include/umf/memory_pool_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ typedef struct umf_memory_pool_ops_t {
/// @brief Finalizes memory pool
/// @param pool pool to finalize
///
void (*finalize)(void *pool);
umf_result_t (*finalize)(void *pool);

///
/// @brief Allocates \p size bytes of uninitialized storage from \p pool
Expand Down
3 changes: 2 additions & 1 deletion include/umf/memory_provider.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,9 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops,
///
/// @brief Destroys memory provider.
/// @param hProvider handle to the memory provider
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
///
void umfMemoryProviderDestroy(umf_memory_provider_handle_t hProvider);
umf_result_t umfMemoryProviderDestroy(umf_memory_provider_handle_t hProvider);

///
/// @brief Allocates \p size bytes of uninitialized storage from memory \p hProvider
Expand Down
2 changes: 1 addition & 1 deletion include/umf/memory_provider_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ typedef struct umf_memory_provider_ops_t {
/// @brief Finalizes memory provider.
/// @param provider provider to finalize
///
void (*finalize)(void *provider);
umf_result_t (*finalize)(void *provider);

///
/// @brief Allocates \p size bytes of uninitialized storage from memory \p provider
Expand Down
5 changes: 3 additions & 2 deletions include/umf/memspace.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
*
* Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2023-2025 Intel Corporation
*
* Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Expand Down Expand Up @@ -58,8 +58,9 @@ umf_result_t umfMemspaceCreateFromNumaArray(unsigned *nodeIds, size_t numIds,
///
/// \brief Destroys memspace
/// \param hMemspace handle to memspace
/// \return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
///
void umfMemspaceDestroy(umf_memspace_handle_t hMemspace);
umf_result_t umfMemspaceDestroy(umf_memspace_handle_t hMemspace);

///
/// \brief Retrieves predefined host all memspace.
Expand Down
3 changes: 2 additions & 1 deletion include/umf/pools/pool_disjoint.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@ umfDisjointPoolSharedLimitsCreate(size_t MaxSize);

/// @brief Destroy previously created pool limits struct.
/// @param hSharedLimits handle to the shared limits struct.
void umfDisjointPoolSharedLimitsDestroy(
/// @return UMF_RESULT_SUCCESS on success or appropriate error code on failure.
umf_result_t umfDisjointPoolSharedLimitsDestroy(
umf_disjoint_pool_shared_limits_handle_t hSharedLimits);

/// @brief Create a struct to store parameters of disjoint pool.
Expand Down
18 changes: 13 additions & 5 deletions src/memory_pool.c
Original file line number Diff line number Diff line change
Expand Up @@ -201,24 +201,31 @@ static umf_result_t umfPoolCreateInternal(const umf_memory_pool_ops_t *ops,
return ret;
}

void umfPoolDestroy(umf_memory_pool_handle_t hPool) {
umf_result_t umfPoolDestroy(umf_memory_pool_handle_t hPool) {
if (umf_ba_global_is_destroyed()) {
return;
return UMF_RESULT_ERROR_UNKNOWN;
}

hPool->ops.finalize(hPool->pool_priv);
umf_result_t ret = UMF_RESULT_SUCCESS;
if (hPool->ops.finalize(hPool->pool_priv) != UMF_RESULT_SUCCESS) {
ret = UMF_RESULT_ERROR_UNKNOWN;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why not just pass the result from finalize()?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I decided to return unknown error when error is not recoverable and pool is corrupted.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would return the error code received from ops.finalize() here - like you have done in umfMemoryProviderDestroy()

}

umf_memory_provider_handle_t hUpstreamProvider = NULL;
umfPoolGetMemoryProvider(hPool, &hUpstreamProvider);

if (!(hPool->flags & UMF_POOL_CREATE_FLAG_DISABLE_TRACKING)) {
// Destroy tracking provider.
umfMemoryProviderDestroy(hPool->provider);
if (umfMemoryProviderDestroy(hPool->provider) != UMF_RESULT_SUCCESS) {
ret = UMF_RESULT_ERROR_UNKNOWN;
}
}

if (hPool->flags & UMF_POOL_CREATE_FLAG_OWN_PROVIDER) {
// Destroy associated memory provider.
umfMemoryProviderDestroy(hUpstreamProvider);
if (umfMemoryProviderDestroy(hUpstreamProvider) != UMF_RESULT_SUCCESS) {
ret = UMF_RESULT_ERROR_UNKNOWN;
}
}

utils_mutex_destroy_not_free(&hPool->lock);
Expand All @@ -227,6 +234,7 @@ void umfPoolDestroy(umf_memory_pool_handle_t hPool) {

// TODO: this free keeps memory in base allocator, so it can lead to OOM in some scenarios (it should be optimized)
umf_ba_global_free(hPool);
return ret;
}

umf_result_t umfFree(void *ptr) {
Expand Down
15 changes: 11 additions & 4 deletions src/memory_provider.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,11 +235,18 @@ umf_result_t umfMemoryProviderCreate(const umf_memory_provider_ops_t *ops,
return UMF_RESULT_SUCCESS;
}

void umfMemoryProviderDestroy(umf_memory_provider_handle_t hProvider) {
if (hProvider && !umf_ba_global_is_destroyed()) {
hProvider->ops.finalize(hProvider->provider_priv);
umf_ba_global_free(hProvider);
umf_result_t umfMemoryProviderDestroy(umf_memory_provider_handle_t hProvider) {
if (umf_ba_global_is_destroyed()) {
return UMF_RESULT_ERROR_UNKNOWN;
}

if (!hProvider) {
return UMF_RESULT_SUCCESS;
}

umf_result_t ret = hProvider->ops.finalize(hProvider->provider_priv);
umf_ba_global_free(hProvider);
return ret;
}

static void
Expand Down
5 changes: 3 additions & 2 deletions src/memspace.c
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
/*
*
* Copyright (C) 2023-2024 Intel Corporation
* Copyright (C) 2023-2025 Intel Corporation
*
* Under the Apache License v2.0 with LLVM Exceptions. See LICENSE.TXT.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Expand Down Expand Up @@ -130,14 +130,15 @@ umf_result_t umfMemspaceNew(umf_memspace_handle_t *hMemspace) {
return UMF_RESULT_SUCCESS;
}

void umfMemspaceDestroy(umf_memspace_handle_t memspace) {
umf_result_t umfMemspaceDestroy(umf_memspace_handle_t memspace) {
assert(memspace);
for (size_t i = 0; i < memspace->size; i++) {
umfMemtargetDestroy(memspace->nodes[i]);
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

check return val from umfMemtargetDestroy and retrun if if != success

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

umfMemtargetDestroy returns void

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

according to this PR umfMemtargetDestroy should at least always return SUCCESS, right?

}

umf_ba_global_free(memspace->nodes);
umf_ba_global_free(memspace);
return UMF_RESULT_SUCCESS;
}

umf_result_t umfMemspaceClone(umf_const_memspace_handle_t hMemspace,
Expand Down
16 changes: 11 additions & 5 deletions src/pool/pool_disjoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -995,9 +995,9 @@ umf_result_t disjoint_pool_get_last_allocation_error(void *pool) {
}

// Define destructor for use with unique_ptr
void disjoint_pool_finalize(void *pool) {
umf_result_t disjoint_pool_finalize(void *pool) {
disjoint_pool_t *hPool = (disjoint_pool_t *)pool;

umf_result_t ret = UMF_RESULT_SUCCESS;
if (hPool->params.pool_trace > 1) {
disjoint_pool_print_stats(hPool);
}
Expand All @@ -1008,10 +1008,15 @@ void disjoint_pool_finalize(void *pool) {

VALGRIND_DO_DESTROY_MEMPOOL(hPool);

umfDisjointPoolSharedLimitsDestroy(hPool->default_shared_limits);
if (umfDisjointPoolSharedLimitsDestroy(hPool->default_shared_limits) !=
UMF_RESULT_SUCCESS) {
ret = UMF_RESULT_ERROR_UNKNOWN;
LOG_ERR("umfDisjointPoolSharedLimitsDestroy failed");
}
critnib_delete(hPool->known_slabs);

umf_ba_global_free(hPool);
return ret;
}

const char *disjoint_pool_get_name(void *pool) {
Expand Down Expand Up @@ -1053,9 +1058,10 @@ umfDisjointPoolSharedLimitsCreate(size_t max_size) {
return ptr;
}

void umfDisjointPoolSharedLimitsDestroy(
umf_disjoint_pool_shared_limits_t *limits) {
umf_result_t
umfDisjointPoolSharedLimitsDestroy(umf_disjoint_pool_shared_limits_t *limits) {
umf_ba_global_free(limits);
return UMF_RESULT_SUCCESS;
}

umf_result_t
Expand Down
9 changes: 7 additions & 2 deletions src/pool/pool_jemalloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -521,18 +521,23 @@ static umf_result_t op_initialize(umf_memory_provider_handle_t provider,
return UMF_RESULT_ERROR_MEMORY_PROVIDER_SPECIFIC;
}

static void op_finalize(void *pool) {
static umf_result_t op_finalize(void *pool) {
assert(pool);
umf_result_t ret = UMF_RESULT_SUCCESS;
jemalloc_memory_pool_t *je_pool = (jemalloc_memory_pool_t *)pool;
for (size_t i = 0; i < je_pool->n_arenas; i++) {
char cmd[64];
unsigned arena = je_pool->arena_index[i];
snprintf(cmd, sizeof(cmd), "arena.%u.destroy", arena);
(void)je_mallctl(cmd, NULL, 0, NULL, 0);
if (je_mallctl(cmd, NULL, 0, NULL, 0)) {
LOG_ERR("Could not destroy jemalloc arena %u", arena);
ret = UMF_RESULT_ERROR_UNKNOWN;
}
}
umf_ba_global_free(je_pool);

VALGRIND_DO_DESTROY_MEMPOOL(pool);
return ret;
}

static size_t op_malloc_usable_size(void *pool, const void *ptr) {
Expand Down
5 changes: 4 additions & 1 deletion src/pool/pool_proxy.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,10 @@ proxy_pool_initialize(umf_memory_provider_handle_t hProvider,
return UMF_RESULT_SUCCESS;
}

static void proxy_pool_finalize(void *pool) { umf_ba_global_free(pool); }
static umf_result_t proxy_pool_finalize(void *pool) {
umf_ba_global_free(pool);
return UMF_RESULT_SUCCESS;
}

static void *proxy_aligned_malloc(void *pool, size_t size, size_t alignment) {
assert(pool);
Expand Down
9 changes: 7 additions & 2 deletions src/pool/pool_scalable.c
Original file line number Diff line number Diff line change
Expand Up @@ -313,10 +313,15 @@ static umf_result_t tbb_pool_initialize(umf_memory_provider_handle_t provider,
return res;
}

static void tbb_pool_finalize(void *pool) {
static umf_result_t tbb_pool_finalize(void *pool) {
tbb_memory_pool_t *pool_data = (tbb_memory_pool_t *)pool;
tbb_callbacks.pool_destroy(pool_data->tbb_pool);
umf_result_t ret = UMF_RESULT_SUCCESS;
if (!tbb_callbacks.pool_destroy(pool_data->tbb_pool)) {
LOG_ERR("TBB pool destroy failed");
ret = UMF_RESULT_ERROR_UNKNOWN;
}
umf_ba_global_free(pool_data);
return ret;
}

static void *tbb_malloc(void *pool, size_t size) {
Expand Down
3 changes: 2 additions & 1 deletion src/provider/provider_cuda.c
Original file line number Diff line number Diff line change
Expand Up @@ -369,8 +369,9 @@ static umf_result_t cu_memory_provider_initialize(const void *params,
return UMF_RESULT_SUCCESS;
}

static void cu_memory_provider_finalize(void *provider) {
static umf_result_t cu_memory_provider_finalize(void *provider) {
umf_ba_global_free(provider);
return UMF_RESULT_SUCCESS;
}

/*
Expand Down
11 changes: 9 additions & 2 deletions src/provider/provider_devdax_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -265,12 +265,19 @@ static umf_result_t devdax_initialize(const void *params, void **provider) {
return ret;
}

static void devdax_finalize(void *provider) {
static umf_result_t devdax_finalize(void *provider) {
devdax_memory_provider_t *devdax_provider = provider;
umf_result_t ret = UMF_RESULT_SUCCESS;
utils_mutex_destroy_not_free(&devdax_provider->lock);
utils_munmap(devdax_provider->base, devdax_provider->size);
if (utils_munmap(devdax_provider->base, devdax_provider->size)) {
LOG_PERR("unmapping the devdax memory failed (path: %s, size: %zu)",
devdax_provider->path, devdax_provider->size);
ret = UMF_RESULT_ERROR_UNKNOWN;
}

coarse_delete(devdax_provider->coarse);
umf_ba_global_free(devdax_provider);
return ret;
}

static umf_result_t devdax_alloc(void *provider, size_t size, size_t alignment,
Expand Down
11 changes: 9 additions & 2 deletions src/provider/provider_file_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -305,12 +305,13 @@ static umf_result_t file_initialize(const void *params, void **provider) {
return ret;
}

static void file_finalize(void *provider) {
static umf_result_t file_finalize(void *provider) {
file_memory_provider_t *file_provider = provider;

uintptr_t key = 0;
uintptr_t rkey = 0;
void *rvalue = NULL;
umf_result_t ret = UMF_RESULT_SUCCESS;
while (1 == critnib_find(file_provider->mmaps, key, FIND_G, &rkey, &rvalue,
NULL)) {
utils_munmap((void *)rkey, (size_t)rvalue);
Expand All @@ -319,11 +320,17 @@ static void file_finalize(void *provider) {
}

utils_mutex_destroy_not_free(&file_provider->lock);
utils_close_fd(file_provider->fd);

if (utils_close_fd(file_provider->fd)) {
LOG_PERR("closing file descriptor %d failed", file_provider->fd);
ret = UMF_RESULT_ERROR_UNKNOWN;
}
critnib_delete(file_provider->fd_offset_map);
critnib_delete(file_provider->mmaps);
coarse_delete(file_provider->coarse);
umf_ba_global_free(file_provider);

return ret;
}

static umf_result_t file_mmap_aligned(file_memory_provider_t *file_provider,
Expand Down
3 changes: 2 additions & 1 deletion src/provider/provider_fixed_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -153,10 +153,11 @@ static umf_result_t fixed_initialize(const void *params, void **provider) {
return ret;
}

static void fixed_finalize(void *provider) {
static umf_result_t fixed_finalize(void *provider) {
fixed_memory_provider_t *fixed_provider = provider;
coarse_delete(fixed_provider->coarse);
umf_ba_global_free(fixed_provider);
return UMF_RESULT_SUCCESS;
}

static umf_result_t fixed_alloc(void *provider, size_t size, size_t alignment,
Expand Down
3 changes: 2 additions & 1 deletion src/provider/provider_level_zero.c
Original file line number Diff line number Diff line change
Expand Up @@ -479,11 +479,12 @@ static umf_result_t query_min_page_size(ze_memory_provider_t *ze_provider,
return ze2umf_result(ze_result);
}

static void ze_memory_provider_finalize(void *provider) {
static umf_result_t ze_memory_provider_finalize(void *provider) {
ze_memory_provider_t *ze_provider = (ze_memory_provider_t *)provider;
umf_ba_global_free(ze_provider->resident_device_handles);

umf_ba_global_free(provider);
return UMF_RESULT_SUCCESS;
}

static umf_result_t ze_memory_provider_initialize(const void *params,
Expand Down
3 changes: 2 additions & 1 deletion src/provider/provider_os_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,7 @@ static umf_result_t os_initialize(const void *params, void **provider) {
return ret;
}

static void os_finalize(void *provider) {
static umf_result_t os_finalize(void *provider) {
os_memory_provider_t *os_provider = provider;

if (os_provider->fd > 0) {
Expand All @@ -721,6 +721,7 @@ static void os_finalize(void *provider) {
}
hwloc_topology_destroy(os_provider->topo);
umf_ba_global_free(os_provider);
return UMF_RESULT_SUCCESS;
}

// TODO: this function should be re-enabled when CTL is implemented
Expand Down
Loading