@@ -33,6 +33,8 @@ static char *DEFAULT_NAME = "disjoint";
33
33
struct ctl disjoint_ctl_root ;
34
34
static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT ;
35
35
36
+ // Disable name ctl for 1.0 release
37
+ #if 0
36
38
static umf_result_t CTL_READ_HANDLER (name )(void * ctx ,
37
39
umf_ctl_query_source_t source ,
38
40
void * arg , size_t size ,
@@ -70,12 +72,88 @@ static umf_result_t CTL_WRITE_HANDLER(name)(void *ctx,
70
72
71
73
return UMF_RESULT_SUCCESS ;
72
74
}
75
+ #endif
76
+ static umf_result_t
77
+ CTL_READ_HANDLER (used_memory )(void * ctx , umf_ctl_query_source_t source ,
78
+ void * arg , size_t size ,
79
+ umf_ctl_index_utlist_t * indexes ) {
80
+ (void )source , (void )indexes ;
81
+ disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
82
+
83
+ if (arg == NULL || size != sizeof (size_t )) {
84
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
85
+ }
86
+
87
+ size_t used_memory = 0 ;
88
+
89
+ // Calculate used memory across all buckets
90
+ for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
91
+ bucket_t * bucket = pool -> buckets [i ];
92
+ utils_mutex_lock (& bucket -> bucket_lock );
93
+
94
+ // Count allocated chunks in available slabs
95
+ slab_list_item_t * it ;
96
+ for (it = bucket -> available_slabs ; it != NULL ; it = it -> next ) {
97
+ slab_t * slab = it -> val ;
98
+ used_memory += slab -> num_chunks_allocated * bucket -> size ;
99
+ }
100
+
101
+ // Count allocated chunks in unavailable slabs (all chunks allocated)
102
+ for (it = bucket -> unavailable_slabs ; it != NULL ; it = it -> next ) {
103
+ slab_t * slab = it -> val ;
104
+ used_memory += slab -> num_chunks_allocated * bucket -> size ;
105
+ }
106
+
107
+ utils_mutex_unlock (& bucket -> bucket_lock );
108
+ }
109
+
110
+ * (size_t * )arg = used_memory ;
111
+ return UMF_RESULT_SUCCESS ;
112
+ }
113
+
114
+ static umf_result_t
115
+ CTL_READ_HANDLER (reserved_memory )(void * ctx , umf_ctl_query_source_t source ,
116
+ void * arg , size_t size ,
117
+ umf_ctl_index_utlist_t * indexes ) {
118
+ (void )source , (void )indexes ;
119
+ disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
120
+
121
+ if (arg == NULL || size != sizeof (size_t )) {
122
+ return UMF_RESULT_ERROR_INVALID_ARGUMENT ;
123
+ }
124
+
125
+ size_t reserved_memory = 0 ;
126
+
127
+ // Calculate reserved memory across all buckets
128
+ for (size_t i = 0 ; i < pool -> buckets_num ; i ++ ) {
129
+ bucket_t * bucket = pool -> buckets [i ];
130
+ utils_mutex_lock (& bucket -> bucket_lock );
131
+
132
+ // Count all slabs (both available and unavailable)
133
+ slab_list_item_t * it ;
134
+ for (it = bucket -> available_slabs ; it != NULL ; it = it -> next ) {
135
+ slab_t * slab = it -> val ;
136
+ reserved_memory += slab -> slab_size ;
137
+ }
138
+
139
+ for (it = bucket -> unavailable_slabs ; it != NULL ; it = it -> next ) {
140
+ slab_t * slab = it -> val ;
141
+ reserved_memory += slab -> slab_size ;
142
+ }
143
+
144
+ utils_mutex_unlock (& bucket -> bucket_lock );
145
+ }
146
+
147
+ * (size_t * )arg = reserved_memory ;
148
+ return UMF_RESULT_SUCCESS ;
149
+ }
73
150
74
- static const umf_ctl_node_t CTL_NODE (disjoint )[] = {CTL_LEAF_RW ( name ),
75
- CTL_NODE_END };
151
+ static const umf_ctl_node_t CTL_NODE (stats )[] = {CTL_LEAF_RO ( used_memory ),
152
+ CTL_LEAF_RO ( reserved_memory ) };
76
153
77
154
static void initialize_disjoint_ctl (void ) {
78
- CTL_REGISTER_MODULE (& disjoint_ctl_root , disjoint );
155
+ CTL_REGISTER_MODULE (& disjoint_ctl_root , stats );
156
+ // CTL_REGISTER_MODULE(&disjoint_ctl_root, name);
79
157
}
80
158
81
159
umf_result_t disjoint_pool_ctl (void * hPool ,
0 commit comments