Skip to content

Commit 6d89f8e

Browse files
committed
Cache lookups into the borrow stack
This adds a very simple LRU-like cache which stores the locations of often-used tags. While the implementation is very simple, the cache hit rate is incredible at ~99.9% on most programs, and often the element at position 0 in the cache has a hit rate of 90%. So the sub-optimality of this cache basicaly vanishes into the noise in a profile. Additionally, we keep a range which denotes where there might be an item granting Unique permission in the stack, so that when we invalidate Uniques we do not need to scan much of the stack, and often scan nothing at all.
1 parent 9230b92 commit 6d89f8e

File tree

1 file changed

+162
-26
lines changed

1 file changed

+162
-26
lines changed

src/stacked_borrows.rs

+162-26
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
44
use log::trace;
55
use std::cell::RefCell;
6+
use std::collections::VecDeque;
67
use std::fmt;
78
use std::num::NonZeroU64;
89
use std::rc::Rc;
@@ -82,16 +83,34 @@ impl fmt::Debug for Item {
8283
}
8384
}
8485

86+
const CACHE_MAX_LEN: usize = 16;
87+
8588
/// Extra per-location state.
86-
#[derive(Clone, Debug, PartialEq, Eq)]
89+
#[derive(Clone, Debug)]
8790
pub struct Stack {
8891
/// Used *mostly* as a stack; never empty.
8992
/// Invariants:
9093
/// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
9194
/// * Except for `Untagged`, no tag occurs in the stack more than once.
9295
borrows: Vec<Item>,
96+
cache: VecDeque<(SbTag, usize)>,
97+
/// Index of the most recently inserted Untagged
98+
top_untagged: Option<usize>,
99+
/// On a read, we need to disable all `Unique` above the granting item. We can avoid most of
100+
/// this scan by keeping track of the region of the borrow stack that may contain `Unique`s.
101+
first_unique: usize,
102+
last_unique: usize,
103+
}
104+
105+
impl PartialEq for Stack {
106+
fn eq(&self, other: &Self) -> bool {
107+
// All the semantics of Stack are in self.borrows, everything else is caching
108+
self.borrows == other.borrows
109+
}
93110
}
94111

112+
impl Eq for Stack {}
113+
95114
/// Extra per-allocation state.
96115
#[derive(Clone, Debug)]
97116
pub struct Stacks {
@@ -276,18 +295,60 @@ impl Permission {
276295
impl<'tcx> Stack {
277296
/// Find the item granting the given kind of access to the given tag, and return where
278297
/// it is on the stack.
279-
fn find_granting(&self, access: AccessKind, tag: SbTag) -> Option<usize> {
280-
self.borrows
281-
.iter()
282-
.enumerate() // we also need to know *where* in the stack
283-
.rev() // search top-to-bottom
284-
// Return permission of first item that grants access.
285-
// We require a permission with the right tag, ensuring U3 and F3.
286-
.find_map(
287-
|(idx, item)| {
288-
if tag == item.tag && item.perm.grants(access) { Some(idx) } else { None }
289-
},
290-
)
298+
fn find_granting(&mut self, access: AccessKind, tag: SbTag) -> Option<usize> {
299+
match tag {
300+
SbTag::Untagged => {
301+
let end = if let Some(idx) = self.top_untagged {
302+
if self.borrows[idx].perm.grants(access) {
303+
return Some(idx);
304+
} else {
305+
idx
306+
}
307+
} else {
308+
self.borrows.len()
309+
};
310+
// Search top-to-bottom
311+
for (idx, item) in self.borrows[..end]
312+
.iter()
313+
.enumerate()
314+
.rev()
315+
.filter(|(_, item)| item.tag == SbTag::Untagged)
316+
{
317+
if self.top_untagged.is_none() {
318+
self.top_untagged = Some(idx);
319+
}
320+
// Return permission of the first item that grants access.
321+
// We require a permission with the right tag, ensuring U3 and F3.
322+
if item.perm.grants(access) {
323+
return Some(idx);
324+
}
325+
}
326+
return None;
327+
}
328+
SbTag::Tagged(_) => {
329+
for cache_idx in 0..self.cache.len() {
330+
let stack_idx = self.cache[cache_idx].1;
331+
if self.cache[cache_idx].0 == tag && self.borrows[stack_idx].perm.grants(access)
332+
{
333+
if cache_idx != 0 {
334+
let element = self.cache.remove(cache_idx).unwrap();
335+
self.cache.push_front(element);
336+
}
337+
return Some(stack_idx);
338+
}
339+
}
340+
for (stack_idx, item) in self.borrows.iter().enumerate().rev() {
341+
if tag == item.tag && item.perm.grants(access) {
342+
if self.cache.len() == CACHE_MAX_LEN {
343+
self.cache.pop_back();
344+
}
345+
self.cache.push_front((tag, stack_idx));
346+
return Some(stack_idx);
347+
}
348+
}
349+
None
350+
}
351+
}
291352
}
292353

293354
/// Find the first write-incompatible item above the given one --
@@ -402,6 +463,32 @@ impl<'tcx> Stack {
402463
)?;
403464
alloc_history.log_invalidation(item.tag, alloc_range, threads);
404465
}
466+
467+
// The drain removes elements from `borrows`, but we need to remove those elements
468+
// from the lookup cache too.
469+
let mut i = 0;
470+
while i < self.cache.len() {
471+
if self.cache[i].1 >= first_incompatible_idx {
472+
self.cache.remove(i);
473+
} else {
474+
i += 1;
475+
}
476+
}
477+
478+
if let Some(idx) = self.top_untagged {
479+
if idx >= first_incompatible_idx {
480+
self.top_untagged = None;
481+
}
482+
}
483+
484+
if first_incompatible_idx <= self.first_unique {
485+
// We removed all the Unique items
486+
self.first_unique = 0;
487+
self.last_unique = 0;
488+
} else {
489+
// Ensure the range doesn't extend past the new top of the stack
490+
self.last_unique = self.last_unique.min(first_incompatible_idx.saturating_sub(1));
491+
}
405492
} else {
406493
// On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
407494
// The reason this is not following the stack discipline (by removing the first Unique and
@@ -411,20 +498,32 @@ impl<'tcx> Stack {
411498
// This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
412499
// reference and use that.
413500
// We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
414-
for idx in ((granting_idx + 1)..self.borrows.len()).rev() {
415-
let item = &mut self.borrows[idx];
416-
if item.perm == Permission::Unique {
417-
trace!("access: disabling item {:?}", item);
418-
Stack::check_protector(
419-
item,
420-
Some((tag, alloc_range, offset, access)),
421-
global,
422-
alloc_history,
423-
)?;
424-
item.perm = Permission::Disabled;
425-
alloc_history.log_invalidation(item.tag, alloc_range, threads);
501+
if granting_idx < self.last_unique {
502+
// add 1 so we don't disable the granting item
503+
let lower = self.first_unique.max(granting_idx + 1);
504+
//let upper = (self.last_unique + 1).min(self.borrows.len());
505+
for item in &mut self.borrows[lower..=self.last_unique] {
506+
if item.perm == Permission::Unique {
507+
trace!("access: disabling item {:?}", item);
508+
Stack::check_protector(
509+
item,
510+
Some((tag, alloc_range, offset, access)),
511+
global,
512+
alloc_history,
513+
)?;
514+
item.perm = Permission::Disabled;
515+
alloc_history.log_invalidation(item.tag, alloc_range, threads);
516+
}
426517
}
427518
}
519+
if granting_idx < self.first_unique {
520+
// We disabled all Unique items
521+
self.first_unique = 0;
522+
self.last_unique = 0;
523+
} else {
524+
// Truncate the range to granting_idx
525+
self.last_unique = self.last_unique.min(granting_idx);
526+
}
428527
}
429528

430529
// Done.
@@ -456,6 +555,10 @@ impl<'tcx> Stack {
456555
Stack::check_protector(&item, None, global, alloc_history)?;
457556
}
458557

558+
self.cache.clear();
559+
self.first_unique = 0;
560+
self.last_unique = 0;
561+
459562
Ok(())
460563
}
461564

@@ -523,6 +626,31 @@ impl<'tcx> Stack {
523626
} else {
524627
trace!("reborrow: adding item {:?}", new);
525628
self.borrows.insert(new_idx, new);
629+
// The above insert changes the meaning of every index in the cache >= new_idx, so now
630+
// we need to find every one of those indexes and increment it.
631+
632+
if new.tag == SbTag::Untagged && new_idx >= self.top_untagged.unwrap_or(0) {
633+
self.top_untagged = Some(new_idx);
634+
}
635+
636+
// Adjust the possibly-unique range if an insert occurs before or within it
637+
if self.first_unique >= new_idx {
638+
self.first_unique += 1;
639+
}
640+
if self.last_unique >= new_idx {
641+
self.last_unique += 1;
642+
}
643+
if new.perm == Permission::Unique {
644+
// Make sure the possibly-unique range contains the new borrow
645+
self.first_unique = self.first_unique.min(new_idx);
646+
self.last_unique = self.last_unique.max(new_idx);
647+
}
648+
649+
for (_tag, idx) in &mut self.cache {
650+
if *idx >= new_idx {
651+
*idx += 1;
652+
}
653+
}
526654
}
527655

528656
Ok(())
@@ -535,7 +663,15 @@ impl<'tcx> Stacks {
535663
/// Creates new stack with initial tag.
536664
fn new(size: Size, perm: Permission, tag: SbTag, local_crates: Rc<[CrateNum]>) -> Self {
537665
let item = Item { perm, tag, protector: None };
538-
let stack = Stack { borrows: vec![item] };
666+
let mut cache = VecDeque::default();
667+
cache.push_front((tag, 0));
668+
let stack = Stack {
669+
borrows: vec![item],
670+
cache,
671+
first_unique: 0,
672+
last_unique: 0,
673+
top_untagged: None,
674+
};
539675

540676
Stacks {
541677
stacks: RefCell::new(RangeMap::new(size, stack)),

0 commit comments

Comments
 (0)