Skip to content

[disjoint] Add CTL memory used/reserved metrics #1430

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
80 changes: 78 additions & 2 deletions src/pool/pool_disjoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,84 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
return UMF_RESULT_SUCCESS;
}

static const umf_ctl_node_t CTL_NODE(disjoint)[] = {CTL_LEAF_RW(name),
CTL_NODE_END};
static umf_result_t CTL_READ_HANDLER(used_memory)(
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
umf_ctl_index_utlist_t *indexes, const char *extra_name,
umf_ctl_query_type_t queryType) {
(void)source, (void)indexes, (void)queryType, (void)extra_name;
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;

if (arg == NULL || size < sizeof(size_t)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

size != sizeof....

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does it really have to be an exact size? Isn't < enough?

return UMF_RESULT_ERROR_INVALID_ARGUMENT;
}

size_t used_memory = 0;

// Calculate used memory across all buckets
for (size_t i = 0; i < pool->buckets_num; i++) {
bucket_t *bucket = pool->buckets[i];
utils_mutex_lock(&bucket->bucket_lock);

// Count allocated chunks in available slabs
slab_list_item_t *it;
for (it = bucket->available_slabs; it != NULL; it = it->next) {
slab_t *slab = it->val;
used_memory += slab->num_chunks_allocated * bucket->size;
}

// Count allocated chunks in unavailable slabs (all chunks allocated)
for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
slab_t *slab = it->val;
used_memory += slab->num_chunks_allocated * bucket->size;
}

utils_mutex_unlock(&bucket->bucket_lock);
}

*(size_t *)arg = used_memory;
return UMF_RESULT_SUCCESS;
}

static umf_result_t CTL_READ_HANDLER(reserved_memory)(
void *ctx, umf_ctl_query_source_t source, void *arg, size_t size,
umf_ctl_index_utlist_t *indexes, const char *extra_name,
umf_ctl_query_type_t queryType) {
(void)source, (void)indexes, (void)queryType, (void)extra_name;
disjoint_pool_t *pool = (disjoint_pool_t *)ctx;

if (arg == NULL || size < sizeof(size_t)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

dito

return UMF_RESULT_ERROR_INVALID_ARGUMENT;
}

size_t reserved_memory = 0;

// Calculate reserved memory across all buckets
for (size_t i = 0; i < pool->buckets_num; i++) {
bucket_t *bucket = pool->buckets[i];
utils_mutex_lock(&bucket->bucket_lock);

// Count all slabs (both available and unavailable)
slab_list_item_t *it;
for (it = bucket->available_slabs; it != NULL; it = it->next) {
slab_t *slab = it->val;
reserved_memory += slab->slab_size;
}

for (it = bucket->unavailable_slabs; it != NULL; it = it->next) {
slab_t *slab = it->val;
reserved_memory += slab->slab_size;
}

utils_mutex_unlock(&bucket->bucket_lock);
}

*(size_t *)arg = reserved_memory;
return UMF_RESULT_SUCCESS;
}

static const umf_ctl_node_t CTL_NODE(disjoint)[] = {
CTL_LEAF_RW(name), CTL_LEAF_RO(used_memory), CTL_LEAF_RO(reserved_memory),
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Used memory should be in the stats subtree

CTL_NODE_END};

static void initialize_disjoint_ctl(void) {
CTL_REGISTER_MODULE(&disjoint_ctl_root, disjoint);
Expand Down
269 changes: 269 additions & 0 deletions test/pools/disjoint_pool_ctl.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@
#include <umf/pools/pool_disjoint.h>
#include <umf/providers/provider_os_memory.h>

#include <vector>

#include "base.hpp"
#include "utils_assert.h"
#include "utils_log.h"

using umf_test::test;
Expand Down Expand Up @@ -152,3 +155,269 @@ TEST_F(test, disjointCtlChangeNameTwice) {
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}

TEST_F(test, disjointCtlUsedMemory) {
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
os_memory_provider_params);
if (providerWrapper.get() == NULL) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

umf_disjoint_pool_params_handle_t params = nullptr;
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));

const size_t slab_min_size = 64 * 1024;
umfDisjointPoolParamsSetMinBucketSize(params, slab_min_size);

PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
params);

// Initially, used memory should be 0
size_t used_memory = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory,
sizeof(used_memory)));
ASSERT_EQ(used_memory, 0ull);

// Allocate some memory
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
ASSERT_NE(ptr1, nullptr);

// Check that used memory increased
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory,
sizeof(used_memory)));
ASSERT_GE(used_memory, 1024ull);

// Allocate more memory
void *ptr2 = umfPoolMalloc(poolWrapper.get(), 2048ull);
ASSERT_NE(ptr2, nullptr);

size_t used_memory2 = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory2,
sizeof(used_memory2)));
ASSERT_GE(used_memory2, used_memory + 2048ull);

// Free memory
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));

// Check that used memory is equal to 0
size_t used_memory3 = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory3,
sizeof(used_memory3)));
ASSERT_EQ(used_memory3, 0ull);

// Allocate again at least slab_min_size
void *ptr3 = umfPoolMalloc(poolWrapper.get(), slab_min_size);
ASSERT_NE(ptr3, nullptr);

// Check that used memory increased
size_t used_memory4 = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory4,
sizeof(used_memory4)));
ASSERT_EQ(used_memory4, slab_min_size);

// Clean up
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}

TEST_F(test, disjointCtlReservedMemory) {
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
const size_t slab_min_size = 64 * 1024;

if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
os_memory_provider_params);
if (providerWrapper.get() == NULL) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

umf_disjoint_pool_params_handle_t params = nullptr;
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));

// Set minimum slab size
umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size);

PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
params);

// Initially, reserved memory should be 0
size_t reserved_memory = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory,
sizeof(reserved_memory)));
ASSERT_EQ(reserved_memory, 0ull);

// Allocate some memory
void *ptr1 = umfPoolMalloc(poolWrapper.get(), 1024ull);
ASSERT_NE(ptr1, nullptr);

// Check that reserved memory increased (should be at least slab_min_size)
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory,
sizeof(reserved_memory)));
ASSERT_GE(reserved_memory, slab_min_size);

void *ptr2 = umfPoolMalloc(poolWrapper.get(), 1024ull);
ASSERT_NE(ptr2, nullptr);

size_t reserved_memory2 = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory2,
sizeof(reserved_memory2)));
size_t used_memory = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory,
sizeof(used_memory)));

ASSERT_GE(reserved_memory2, slab_min_size);

// Free memory - reserved memory should stay the same
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr1));
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr2));

size_t reserved_memory3 = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory3,
sizeof(reserved_memory3)));
ASSERT_EQ(reserved_memory3, slab_min_size);

// Clean up
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}

TEST_F(test, disjointCtlMemoryMetricsConsistency) {
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
os_memory_provider_params);
if (providerWrapper.get() == NULL) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

umf_disjoint_pool_params_handle_t params = nullptr;
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));

// Set minimum slab size
size_t slab_min_size = 64 * 1024;
ASSERT_SUCCESS(umfDisjointPoolParamsSetSlabMinSize(params, slab_min_size));
ASSERT_SUCCESS(umfDisjointPoolParamsSetCapacity(params, 4));

PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
params);

const size_t n_allocations = 10; // Number of allocations

// Allocate memory
std::vector<void *> ptrs;
for (size_t i = 0; i < n_allocations; i++) {
void *ptr = umfPoolMalloc(poolWrapper.get(), slab_min_size);
ASSERT_NE(ptr, nullptr);
ptrs.push_back(ptr);
}

// Get memory metrics
size_t used_memory = 0;
size_t reserved_memory = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory,
sizeof(used_memory)));
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory,
sizeof(reserved_memory)));

// Used memory should be at least the total allocated
ASSERT_GE(used_memory, n_allocations * slab_min_size);

// Reserved memory should be at least the used memory
ASSERT_GE(reserved_memory, 4 * slab_min_size);

// Free all memory
for (void *ptr : ptrs) {
ASSERT_SUCCESS(umfPoolFree(poolWrapper.get(), ptr));
}

// Check metrics after free
size_t used_memory_after = 0;
size_t reserved_memory_after = 0;
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &used_memory_after,
sizeof(used_memory_after)));
ASSERT_SUCCESS(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &reserved_memory_after,
sizeof(reserved_memory_after)));

// Used memory should be 0 after freeing
ASSERT_EQ(used_memory_after, 0ull);
// Reserved memory should remain the same (pooling)
ASSERT_EQ(reserved_memory_after, 4 * slab_min_size);

// Clean up
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}

TEST_F(test, disjointCtlMemoryMetricsInvalidArgs) {
umf_os_memory_provider_params_handle_t os_memory_provider_params = nullptr;
if (UMF_RESULT_ERROR_NOT_SUPPORTED ==
umfOsMemoryProviderParamsCreate(&os_memory_provider_params)) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

ProviderWrapper providerWrapper(umfOsMemoryProviderOps(),
os_memory_provider_params);
if (providerWrapper.get() == NULL) {
GTEST_SKIP() << "OS memory provider is not supported!";
}

umf_disjoint_pool_params_handle_t params = nullptr;
ASSERT_SUCCESS(umfDisjointPoolParamsCreate(&params));
PoolWrapper poolWrapper(providerWrapper.get(), umfDisjointPoolOps(),
params);

// Test invalid arguments
size_t value = 0;

// NULL arg pointer
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), NULL, sizeof(value)),
UMF_RESULT_ERROR_INVALID_ARGUMENT);

// Size too small
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.used_memory",
poolWrapper.get(), &value, sizeof(size_t) / 2),
UMF_RESULT_ERROR_INVALID_ARGUMENT);

// Same tests for reserved_memory
ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), NULL, sizeof(value)),
UMF_RESULT_ERROR_INVALID_ARGUMENT);

ASSERT_EQ(umfCtlGet("umf.pool.by_handle.disjoint.reserved_memory",
poolWrapper.get(), &value, sizeof(size_t) / 2),
UMF_RESULT_ERROR_INVALID_ARGUMENT);

// Clean up
ASSERT_SUCCESS(umfDisjointPoolParamsDestroy(params));
ASSERT_SUCCESS(umfOsMemoryProviderParamsDestroy(os_memory_provider_params));
}
Loading