|
89 | 89 | #define NIB ((1ULL << SLICE) - 1)
|
90 | 90 | #define SLNODES (1 << SLICE)
|
91 | 91 |
|
| 92 | +// it has to be >= 2 |
| 93 | +#define LEAF_VALID (2ULL) |
| 94 | + |
92 | 95 | typedef uintptr_t word;
|
93 | 96 | typedef uint8_t sh_t;
|
94 | 97 |
|
@@ -331,18 +334,26 @@ static void free_leaf(struct critnib *__restrict c,
|
331 | 334 | return;
|
332 | 335 | }
|
333 | 336 |
|
| 337 | + // k should be added to the c->deleted_leaf list here |
| 338 | + // or in critnib_release() when the reference count drops to 0. |
| 339 | + utils_atomic_store_release_u8(&k->pending_deleted_leaf, 1); |
| 340 | + |
334 | 341 | if (c->cb_free_leaf) {
|
335 | 342 | uint64_t ref_count;
|
336 | 343 | utils_atomic_load_acquire_u64(&k->ref_count, &ref_count);
|
337 | 344 | if (ref_count > 0) {
|
338 |
| - // k will be added to c->deleted_leaf in critnib_release() |
| 345 | + // k will be added to the c->deleted_leaf list in critnib_release() |
339 | 346 | // when the reference count drops to 0.
|
340 |
| - utils_atomic_store_release_u8(&k->pending_deleted_leaf, 1); |
341 | 347 | return;
|
342 | 348 | }
|
343 | 349 | }
|
344 | 350 |
|
345 |
| - add_to_deleted_leaf_list(c, k); |
| 351 | + uint8_t expected = 1; |
| 352 | + uint8_t desired = 0; |
| 353 | + if (utils_compare_exchange_u8(&k->pending_deleted_leaf, &expected, |
| 354 | + &desired)) { |
| 355 | + add_to_deleted_leaf_list(c, k); |
| 356 | + } |
346 | 357 | }
|
347 | 358 |
|
348 | 359 | /*
|
@@ -392,8 +403,8 @@ int critnib_insert(struct critnib *c, word key, void *value, int update) {
|
392 | 403 | utils_atomic_store_release_u8(&k->pending_deleted_leaf, 0);
|
393 | 404 |
|
394 | 405 | if (c->cb_free_leaf) {
|
395 |
| - // mark the leaf as valid (ref_count == 1) |
396 |
| - utils_atomic_store_release_u64(&k->ref_count, 1ULL); |
| 406 | + // mark the leaf as valid (ref_count == 2) |
| 407 | + utils_atomic_store_release_u64(&k->ref_count, LEAF_VALID); |
397 | 408 | } else {
|
398 | 409 | // the reference counter is not used in this case
|
399 | 410 | utils_atomic_store_release_u64(&k->ref_count, 0ULL);
|
@@ -602,36 +613,52 @@ int critnib_release(struct critnib *c, void *ref) {
|
602 | 613 | struct critnib_leaf *k = (struct critnib_leaf *)ref;
|
603 | 614 |
|
604 | 615 | uint64_t ref_count;
|
605 |
| - utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
606 |
| - |
607 |
| - if (ref_count == 0) { |
608 |
| - return -1; |
609 |
| - } |
610 |
| - |
| 616 | + uint64_t ref_desired; |
611 | 617 | /* decrement the reference count */
|
612 |
| - if (utils_atomic_decrement_u64(&k->ref_count) == 0) { |
613 |
| - void *to_be_freed = NULL; |
614 |
| - utils_atomic_load_acquire_ptr(&k->to_be_freed, &to_be_freed); |
615 |
| - if (to_be_freed) { |
616 |
| - utils_atomic_store_release_ptr(&k->to_be_freed, NULL); |
617 |
| - c->cb_free_leaf(c->leaf_allocator, to_be_freed); |
618 |
| - } |
619 |
| - uint8_t pending_deleted_leaf; |
620 |
| - utils_atomic_load_acquire_u8(&k->pending_deleted_leaf, |
621 |
| - &pending_deleted_leaf); |
622 |
| - if (pending_deleted_leaf) { |
623 |
| - utils_atomic_store_release_u8(&k->pending_deleted_leaf, 0); |
624 |
| - add_to_deleted_leaf_list(c, k); |
| 618 | + do { |
| 619 | + utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
| 620 | + if (ref_count < LEAF_VALID) { |
| 621 | +#ifndef NDEBUG |
| 622 | + LOG_FATAL("critnib_release() was called too many times (ref_count " |
| 623 | + "= %llu)\n", |
| 624 | + (unsigned long long)ref_count); |
| 625 | + assert(ref_count >= LEAF_VALID); |
| 626 | +#endif |
| 627 | + return -1; |
625 | 628 | }
|
| 629 | + ref_desired = ref_count - 1; |
| 630 | + } while ( |
| 631 | + !utils_compare_exchange_u64(&k->ref_count, &ref_count, &ref_desired)); |
| 632 | + |
| 633 | + if (ref_desired >= LEAF_VALID) { |
| 634 | + // ref_counter was decremented and it is still valid |
| 635 | + return 0; |
626 | 636 | }
|
627 | 637 |
|
| 638 | + /* ref_counter == (LEAF_VALID - 1)) - the leaf will be freed */ |
| 639 | + void *to_be_freed = NULL; |
| 640 | + utils_atomic_load_acquire_ptr(&k->to_be_freed, &to_be_freed); |
| 641 | + utils_atomic_store_release_ptr(&k->to_be_freed, NULL); |
628 | 642 | #ifndef NDEBUG
|
629 |
| - // check if the reference count is overflowed |
630 |
| - utils_atomic_load_acquire_u64(&k->ref_count, &ref_count); |
631 |
| - assert((ref_count & (1ULL << 63)) == 0); |
632 |
| - assert(ref_count != (uint64_t)(0 - 1ULL)); |
| 643 | + if (to_be_freed == NULL) { |
| 644 | + LOG_FATAL("leaf will not be freed (to_be_freed == NULL, value = %p)\n", |
| 645 | + k->value); |
| 646 | + assert(to_be_freed != NULL); |
| 647 | + } |
633 | 648 | #endif
|
634 | 649 |
|
| 650 | + // mark the leaf as not used (ref_count == 0) |
| 651 | + utils_atomic_store_release_u64(&k->ref_count, 0ULL); |
| 652 | + |
| 653 | + c->cb_free_leaf(c->leaf_allocator, to_be_freed); |
| 654 | + |
| 655 | + uint8_t expected = 1; |
| 656 | + uint8_t desired = 0; |
| 657 | + if (utils_compare_exchange_u8(&k->pending_deleted_leaf, &expected, |
| 658 | + &desired)) { |
| 659 | + add_to_deleted_leaf_list(c, k); |
| 660 | + } |
| 661 | + |
635 | 662 | return 0;
|
636 | 663 | }
|
637 | 664 |
|
@@ -661,7 +688,7 @@ static inline int increment_ref_count(struct critnib_leaf *k) {
|
661 | 688 |
|
662 | 689 | do {
|
663 | 690 | utils_atomic_load_acquire_u64(&k->ref_count, &expected);
|
664 |
| - if (expected == 0) { |
| 691 | + if (expected < LEAF_VALID) { |
665 | 692 | return -1;
|
666 | 693 | }
|
667 | 694 | desired = expected + 1;
|
|
0 commit comments