3
3
4
4
use log:: trace;
5
5
use std:: cell:: RefCell ;
6
+ use std:: collections:: VecDeque ;
6
7
use std:: fmt;
7
8
use std:: num:: NonZeroU64 ;
8
9
use std:: rc:: Rc ;
@@ -82,16 +83,34 @@ impl fmt::Debug for Item {
82
83
}
83
84
}
84
85
86
+ const CACHE_MAX_LEN : usize = 16 ;
87
+
85
88
/// Extra per-location state.
86
- #[ derive( Clone , Debug , PartialEq , Eq ) ]
89
+ #[ derive( Clone , Debug ) ]
87
90
pub struct Stack {
88
91
/// Used *mostly* as a stack; never empty.
89
92
/// Invariants:
90
93
/// * Above a `SharedReadOnly` there can only be more `SharedReadOnly`.
91
94
/// * Except for `Untagged`, no tag occurs in the stack more than once.
92
95
borrows : Vec < Item > ,
96
+ cache : VecDeque < ( SbTag , usize ) > ,
97
+ /// Index of the most recently inserted Untagged
98
+ top_untagged : Option < usize > ,
99
+ /// On a read, we need to disable all `Unique` above the granting item. We can avoid most of
100
+ /// this scan by keeping track of the region of the borrow stack that may contain `Unique`s.
101
+ first_unique : usize ,
102
+ last_unique : usize ,
103
+ }
104
+
105
+ impl PartialEq for Stack {
106
+ fn eq ( & self , other : & Self ) -> bool {
107
+ // All the semantics of Stack are in self.borrows, everything else is caching
108
+ self . borrows == other. borrows
109
+ }
93
110
}
94
111
112
+ impl Eq for Stack { }
113
+
95
114
/// Extra per-allocation state.
96
115
#[ derive( Clone , Debug ) ]
97
116
pub struct Stacks {
@@ -276,18 +295,60 @@ impl Permission {
276
295
impl < ' tcx > Stack {
277
296
/// Find the item granting the given kind of access to the given tag, and return where
278
297
/// it is on the stack.
279
- fn find_granting ( & self , access : AccessKind , tag : SbTag ) -> Option < usize > {
280
- self . borrows
281
- . iter ( )
282
- . enumerate ( ) // we also need to know *where* in the stack
283
- . rev ( ) // search top-to-bottom
284
- // Return permission of first item that grants access.
285
- // We require a permission with the right tag, ensuring U3 and F3.
286
- . find_map (
287
- |( idx, item) | {
288
- if tag == item. tag && item. perm . grants ( access) { Some ( idx) } else { None }
289
- } ,
290
- )
298
+ fn find_granting ( & mut self , access : AccessKind , tag : SbTag ) -> Option < usize > {
299
+ match tag {
300
+ SbTag :: Untagged => {
301
+ let end = if let Some ( idx) = self . top_untagged {
302
+ if self . borrows [ idx] . perm . grants ( access) {
303
+ return Some ( idx) ;
304
+ } else {
305
+ idx
306
+ }
307
+ } else {
308
+ self . borrows . len ( )
309
+ } ;
310
+ // Search top-to-bottom
311
+ for ( idx, item) in self . borrows [ ..end]
312
+ . iter ( )
313
+ . enumerate ( )
314
+ . rev ( )
315
+ . filter ( |( _, item) | item. tag == SbTag :: Untagged )
316
+ {
317
+ if self . top_untagged . is_none ( ) {
318
+ self . top_untagged = Some ( idx) ;
319
+ }
320
+ // Return permission of the first item that grants access.
321
+ // We require a permission with the right tag, ensuring U3 and F3.
322
+ if item. perm . grants ( access) {
323
+ return Some ( idx) ;
324
+ }
325
+ }
326
+ return None ;
327
+ }
328
+ SbTag :: Tagged ( _) => {
329
+ for cache_idx in 0 ..self . cache . len ( ) {
330
+ let stack_idx = self . cache [ cache_idx] . 1 ;
331
+ if self . cache [ cache_idx] . 0 == tag && self . borrows [ stack_idx] . perm . grants ( access)
332
+ {
333
+ if cache_idx != 0 {
334
+ let element = self . cache . remove ( cache_idx) . unwrap ( ) ;
335
+ self . cache . push_front ( element) ;
336
+ }
337
+ return Some ( stack_idx) ;
338
+ }
339
+ }
340
+ for ( stack_idx, item) in self . borrows . iter ( ) . enumerate ( ) . rev ( ) {
341
+ if tag == item. tag && item. perm . grants ( access) {
342
+ if self . cache . len ( ) == CACHE_MAX_LEN {
343
+ self . cache . pop_back ( ) ;
344
+ }
345
+ self . cache . push_front ( ( tag, stack_idx) ) ;
346
+ return Some ( stack_idx) ;
347
+ }
348
+ }
349
+ None
350
+ }
351
+ }
291
352
}
292
353
293
354
/// Find the first write-incompatible item above the given one --
@@ -402,6 +463,32 @@ impl<'tcx> Stack {
402
463
) ?;
403
464
alloc_history. log_invalidation ( item. tag , alloc_range, threads) ;
404
465
}
466
+
467
+ // The drain removes elements from `borrows`, but we need to remove those elements
468
+ // from the lookup cache too.
469
+ let mut i = 0 ;
470
+ while i < self . cache . len ( ) {
471
+ if self . cache [ i] . 1 >= first_incompatible_idx {
472
+ self . cache . remove ( i) ;
473
+ } else {
474
+ i += 1 ;
475
+ }
476
+ }
477
+
478
+ if let Some ( idx) = self . top_untagged {
479
+ if idx >= first_incompatible_idx {
480
+ self . top_untagged = None ;
481
+ }
482
+ }
483
+
484
+ if first_incompatible_idx <= self . first_unique {
485
+ // We removed all the Unique items
486
+ self . first_unique = 0 ;
487
+ self . last_unique = 0 ;
488
+ } else {
489
+ // Ensure the range doesn't extend past the new top of the stack
490
+ self . last_unique = self . last_unique . min ( first_incompatible_idx. saturating_sub ( 1 ) ) ;
491
+ }
405
492
} else {
406
493
// On a read, *disable* all `Unique` above the granting item. This ensures U2 for read accesses.
407
494
// The reason this is not following the stack discipline (by removing the first Unique and
@@ -411,20 +498,32 @@ impl<'tcx> Stack {
411
498
// This pattern occurs a lot in the standard library: create a raw pointer, then also create a shared
412
499
// reference and use that.
413
500
// We *disable* instead of removing `Unique` to avoid "connecting" two neighbouring blocks of SRWs.
414
- for idx in ( ( granting_idx + 1 ) ..self . borrows . len ( ) ) . rev ( ) {
415
- let item = & mut self . borrows [ idx] ;
416
- if item. perm == Permission :: Unique {
417
- trace ! ( "access: disabling item {:?}" , item) ;
418
- Stack :: check_protector (
419
- item,
420
- Some ( ( tag, alloc_range, offset, access) ) ,
421
- global,
422
- alloc_history,
423
- ) ?;
424
- item. perm = Permission :: Disabled ;
425
- alloc_history. log_invalidation ( item. tag , alloc_range, threads) ;
501
+ if granting_idx < self . last_unique {
502
+ // add 1 so we don't disable the granting item
503
+ let lower = self . first_unique . max ( granting_idx + 1 ) ;
504
+ //let upper = (self.last_unique + 1).min(self.borrows.len());
505
+ for item in & mut self . borrows [ lower..=self . last_unique ] {
506
+ if item. perm == Permission :: Unique {
507
+ trace ! ( "access: disabling item {:?}" , item) ;
508
+ Stack :: check_protector (
509
+ item,
510
+ Some ( ( tag, alloc_range, offset, access) ) ,
511
+ global,
512
+ alloc_history,
513
+ ) ?;
514
+ item. perm = Permission :: Disabled ;
515
+ alloc_history. log_invalidation ( item. tag , alloc_range, threads) ;
516
+ }
426
517
}
427
518
}
519
+ if granting_idx < self . first_unique {
520
+ // We disabled all Unique items
521
+ self . first_unique = 0 ;
522
+ self . last_unique = 0 ;
523
+ } else {
524
+ // Truncate the range to granting_idx
525
+ self . last_unique = self . last_unique . min ( granting_idx) ;
526
+ }
428
527
}
429
528
430
529
// Done.
@@ -456,6 +555,10 @@ impl<'tcx> Stack {
456
555
Stack :: check_protector ( & item, None , global, alloc_history) ?;
457
556
}
458
557
558
+ self . cache . clear ( ) ;
559
+ self . first_unique = 0 ;
560
+ self . last_unique = 0 ;
561
+
459
562
Ok ( ( ) )
460
563
}
461
564
@@ -523,6 +626,31 @@ impl<'tcx> Stack {
523
626
} else {
524
627
trace ! ( "reborrow: adding item {:?}" , new) ;
525
628
self . borrows . insert ( new_idx, new) ;
629
+ // The above insert changes the meaning of every index in the cache >= new_idx, so now
630
+ // we need to find every one of those indexes and increment it.
631
+
632
+ if new. tag == SbTag :: Untagged && new_idx >= self . top_untagged . unwrap_or ( 0 ) {
633
+ self . top_untagged = Some ( new_idx) ;
634
+ }
635
+
636
+ // Adjust the possibly-unique range if an insert occurs before or within it
637
+ if self . first_unique >= new_idx {
638
+ self . first_unique += 1 ;
639
+ }
640
+ if self . last_unique >= new_idx {
641
+ self . last_unique += 1 ;
642
+ }
643
+ if new. perm == Permission :: Unique {
644
+ // Make sure the possibly-unique range contains the new borrow
645
+ self . first_unique = self . first_unique . min ( new_idx) ;
646
+ self . last_unique = self . last_unique . max ( new_idx) ;
647
+ }
648
+
649
+ for ( _tag, idx) in & mut self . cache {
650
+ if * idx >= new_idx {
651
+ * idx += 1 ;
652
+ }
653
+ }
526
654
}
527
655
528
656
Ok ( ( ) )
@@ -535,7 +663,15 @@ impl<'tcx> Stacks {
535
663
/// Creates new stack with initial tag.
536
664
fn new ( size : Size , perm : Permission , tag : SbTag , local_crates : Rc < [ CrateNum ] > ) -> Self {
537
665
let item = Item { perm, tag, protector : None } ;
538
- let stack = Stack { borrows : vec ! [ item] } ;
666
+ let mut cache = VecDeque :: default ( ) ;
667
+ cache. push_front ( ( tag, 0 ) ) ;
668
+ let stack = Stack {
669
+ borrows : vec ! [ item] ,
670
+ cache,
671
+ first_unique : 0 ,
672
+ last_unique : 0 ,
673
+ top_untagged : None ,
674
+ } ;
539
675
540
676
Stacks {
541
677
stacks : RefCell :: new ( RangeMap :: new ( size, stack) ) ,
0 commit comments