@@ -33,6 +33,7 @@ static char *DEFAULT_NAME = "disjoint";
33
33
struct ctl disjoint_ctl_root ;
34
34
static UTIL_ONCE_FLAG ctl_initialized = UTIL_ONCE_FLAG_INIT ;
35
35
36
+ // CTL: name attribute
36
37
static int CTL_READ_HANDLER (name )(void * ctx , umf_ctl_query_source_t source ,
37
38
void * arg , size_t size ,
38
39
umf_ctl_index_utlist_t * indexes ,
@@ -66,8 +67,28 @@ static int CTL_WRITE_HANDLER(name)(void *ctx, umf_ctl_query_source_t source,
66
67
return 0 ;
67
68
}
68
69
69
- static const umf_ctl_node_t CTL_NODE (disjoint )[] = {CTL_LEAF_RW (name ),
70
- CTL_NODE_END };
70
+ // CTL: allocation counters
71
+ static uint64_t allocation_balance = 0 ;
72
+
73
+ static int CTL_READ_HANDLER (allocation_balance )(
74
+ void * ctx , umf_ctl_query_source_t source , void * arg , size_t size ,
75
+ umf_ctl_index_utlist_t * indexes , const char * extra_name ,
76
+ umf_ctl_query_type_t queryType ) {
77
+ (void )ctx , (void )source , (void )size , (void )indexes , (void )extra_name ,
78
+ (void )queryType ;
79
+ disjoint_pool_t * pool = (disjoint_pool_t * )ctx ;
80
+ if (arg == NULL ) {
81
+ return -1 ;
82
+ }
83
+ uint64_t * balance = (uint64_t * )arg ;
84
+ * balance = 0 ;
85
+ utils_atomic_load_acquire_u64 (& allocation_balance , balance );
86
+
87
+ return 0 ;
88
+ }
89
+
90
+ static const umf_ctl_node_t CTL_NODE (disjoint )[] = {
91
+ CTL_LEAF_RW (name ), CTL_LEAF_RO (allocation_balance ), CTL_NODE_END };
71
92
72
93
static void initialize_disjoint_ctl (void ) {
73
94
CTL_REGISTER_MODULE (& disjoint_ctl_root , disjoint );
@@ -579,7 +600,6 @@ static void *disjoint_pool_allocate(disjoint_pool_t *pool, size_t size) {
579
600
}
580
601
581
602
void * ptr = NULL ;
582
-
583
603
if (size > pool -> params .max_poolable_size ) {
584
604
umf_result_t ret =
585
605
umfMemoryProviderAlloc (pool -> provider , size , 0 , & ptr );
@@ -755,7 +775,7 @@ umf_result_t disjoint_pool_initialize(umf_memory_provider_handle_t provider,
755
775
void * disjoint_pool_malloc (void * pool , size_t size ) {
756
776
disjoint_pool_t * hPool = (disjoint_pool_t * )pool ;
757
777
void * ptr = disjoint_pool_allocate (hPool , size );
758
-
778
+ utils_atomic_increment_u64 ( & allocation_balance );
759
779
return ptr ;
760
780
}
761
781
@@ -939,8 +959,9 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
939
959
if (ret != UMF_RESULT_SUCCESS ) {
940
960
TLS_last_allocation_error = ret ;
941
961
LOG_ERR ("deallocation from the memory provider failed" );
962
+ } else {
963
+ utils_atomic_decrement_u64 (& allocation_balance );
942
964
}
943
-
944
965
return ret ;
945
966
}
946
967
@@ -971,6 +992,9 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
971
992
critnib_release (disjoint_pool -> known_slabs , ref_slab );
972
993
973
994
if (disjoint_pool -> params .pool_trace > 1 ) {
995
+ printf ("Freeing %8zu %s bytes from %s -> %p\n" , bucket -> size ,
996
+ disjoint_pool -> params .name , (to_pool ? "pool" : "provider" ),
997
+ unaligned_ptr );
974
998
bucket -> free_count ++ ;
975
999
}
976
1000
@@ -985,7 +1009,7 @@ umf_result_t disjoint_pool_free(void *pool, void *ptr) {
985
1009
disjoint_pool_get_limits (disjoint_pool )-> total_size , name ,
986
1010
disjoint_pool -> params .cur_pool_size );
987
1011
}
988
-
1012
+ utils_atomic_decrement_u64 ( & allocation_balance );
989
1013
return UMF_RESULT_SUCCESS ;
990
1014
}
991
1015
0 commit comments