Skip to content

Commit bf8635d

Browse files
committed
Use Option for drop argument and move rehash_in_place to the test module
1 parent 3e8b55e commit bf8635d

File tree

1 file changed

+24
-25
lines changed

1 file changed

+24
-25
lines changed

src/raw/mod.rs

+24-25
Original file line numberDiff line numberDiff line change
@@ -689,28 +689,15 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
689689
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
690690
fallibility,
691691
TableLayout::new::<T>(),
692-
mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)),
693-
mem::needs_drop::<T>(),
692+
if mem::needs_drop::<T>() {
693+
Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
694+
} else {
695+
None
696+
},
694697
)
695698
}
696699
}
697700

698-
/// Rehashes the contents of the table in place (i.e. without changing the
699-
/// allocation).
700-
///
701-
/// If `hasher` panics then some the table's contents may be lost.
702-
#[cfg(test)]
703-
fn rehash_in_place(&mut self, hasher: impl Fn(&T) -> u64) {
704-
unsafe {
705-
self.table.rehash_in_place(
706-
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
707-
mem::size_of::<T>(),
708-
mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)),
709-
mem::needs_drop::<T>(),
710-
);
711-
}
712-
}
713-
714701
/// Allocates a new table of a different size and moves the contents of the
715702
/// current table into it.
716703
fn resize(
@@ -1389,8 +1376,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
13891376
hasher: &dyn Fn(&mut Self, usize) -> u64,
13901377
fallibility: Fallibility,
13911378
layout: TableLayout,
1392-
drop: fn(*mut u8),
1393-
drops: bool,
1379+
drop: Option<fn(*mut u8)>,
13941380
) -> Result<(), TryReserveError> {
13951381
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
13961382
let new_items = match self.items.checked_add(additional) {
@@ -1401,7 +1387,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14011387
if new_items <= full_capacity / 2 {
14021388
// Rehash in-place without re-allocating if we have plenty of spare
14031389
// capacity that is locked up due to DELETED entries.
1404-
self.rehash_in_place(hasher, layout.size, drop, drops);
1390+
self.rehash_in_place(hasher, layout.size, drop);
14051391
Ok(())
14061392
} else {
14071393
// Otherwise, conservatively resize to at least the next size up
@@ -1475,8 +1461,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14751461
&mut self,
14761462
hasher: &dyn Fn(&mut Self, usize) -> u64,
14771463
size_of: usize,
1478-
drop: fn(*mut u8),
1479-
drops: bool,
1464+
drop: Option<fn(*mut u8)>,
14801465
) {
14811466
// If the hash function panics then properly clean up any elements
14821467
// that we haven't rehashed yet. We unfortunately can't preserve the
@@ -1485,7 +1470,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
14851470
self.prepare_rehash_in_place();
14861471

14871472
let mut guard = guard(self, move |self_| {
1488-
if drops {
1473+
if let Some(drop) = drop {
14891474
for i in 0..self_.buckets() {
14901475
if *self_.ctrl(i) == DELETED {
14911476
self_.set_ctrl(i, EMPTY);
@@ -2375,6 +2360,20 @@ impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
23752360
mod test_map {
23762361
use super::*;
23772362

2363+
fn rehash_in_place<T>(table: &mut RawTable<T>, hasher: impl Fn(&T) -> u64) {
2364+
unsafe {
2365+
table.table.rehash_in_place(
2366+
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
2367+
mem::size_of::<T>(),
2368+
if mem::needs_drop::<T>() {
2369+
Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
2370+
} else {
2371+
None
2372+
},
2373+
);
2374+
}
2375+
}
2376+
23782377
#[test]
23792378
fn rehash() {
23802379
let mut table = RawTable::new();
@@ -2390,7 +2389,7 @@ mod test_map {
23902389
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
23912390
}
23922391

2393-
table.rehash_in_place(hasher);
2392+
rehash_in_place(&mut table, hasher);
23942393

23952394
for i in 0..100 {
23962395
unsafe {

0 commit comments

Comments
 (0)