1
- use std:: cell:: { Cell , RefCell } ;
1
+ use std:: cell:: RefCell ;
2
+ use std:: collections:: { HashMap , hash_map:: Entry } ;
3
+ use std:: cmp:: max;
2
4
3
5
use rand:: Rng ;
4
6
5
- use rustc:: mir:: interpret:: { AllocId , Pointer , InterpResult } ;
6
- use rustc_mir:: interpret:: Memory ;
7
+ use rustc_mir:: interpret:: { AllocId , Pointer , InterpResult , Memory , AllocCheck } ;
7
8
use rustc_target:: abi:: Size ;
8
9
9
10
use crate :: { Evaluator , Tag , STACK_ADDR } ;
10
11
11
12
pub type MemoryExtra = RefCell < GlobalState > ;
12
13
13
- #[ derive( Clone , Debug , Default ) ]
14
- pub struct AllocExtra {
15
- base_addr : Cell < Option < u64 > >
16
- }
17
-
18
14
#[ derive( Clone , Debug ) ]
19
15
pub struct GlobalState {
20
16
/// This is used as a map between the address of each allocation and its `AllocId`.
21
17
/// It is always sorted
22
18
pub int_to_ptr_map : Vec < ( u64 , AllocId ) > ,
19
+ /// The base address for each allocation. We cannot put that into
20
+ /// `AllocExtra` because function pointers also have a base address, and
21
+ /// they do not have an `AllocExtra`.
22
+ /// This is the inverse of `int_to_ptr_map`.
23
+ pub base_addr : HashMap < AllocId , u64 > ,
23
24
/// This is used as a memory address when a new pointer is casted to an integer. It
24
25
/// is always larger than any address that was previously made part of a block.
25
26
pub next_base_addr : u64 ,
@@ -29,6 +30,7 @@ impl Default for GlobalState {
29
30
fn default ( ) -> Self {
30
31
GlobalState {
31
32
int_to_ptr_map : Vec :: default ( ) ,
33
+ base_addr : HashMap :: default ( ) ,
32
34
next_base_addr : STACK_ADDR ,
33
35
}
34
36
}
@@ -71,13 +73,13 @@ impl<'mir, 'tcx> GlobalState {
71
73
memory : & Memory < ' mir , ' tcx , Evaluator < ' tcx > > ,
72
74
) -> InterpResult < ' tcx , u64 > {
73
75
let mut global_state = memory. extra . intptrcast . borrow_mut ( ) ;
76
+ let global_state = & mut * global_state;
74
77
75
- let alloc = memory. get ( ptr. alloc_id ) ?;
76
- let align = alloc. align . bytes ( ) ;
78
+ let ( size, align) = memory. get_size_and_align ( ptr. alloc_id , AllocCheck :: Live ) ?;
77
79
78
- let base_addr = match alloc . extra . intptrcast . base_addr . get ( ) {
79
- Some ( base_addr ) => base_addr ,
80
- None => {
80
+ let base_addr = match global_state . base_addr . entry ( ptr . alloc_id ) {
81
+ Entry :: Occupied ( entry ) => * entry . get ( ) ,
82
+ Entry :: Vacant ( entry ) => {
81
83
// This allocation does not have a base address yet, pick one.
82
84
// Leave some space to the previous allocation, to give it some chance to be less aligned.
83
85
let slack = {
@@ -86,11 +88,12 @@ impl<'mir, 'tcx> GlobalState {
86
88
rng. gen_range ( 0 , 16 )
87
89
} ;
88
90
// From next_base_addr + slack, round up to adjust for alignment.
89
- let base_addr = Self :: align_addr ( global_state. next_base_addr + slack, align) ;
90
- alloc . extra . intptrcast . base_addr . set ( Some ( base_addr) ) ;
91
+ let base_addr = Self :: align_addr ( global_state. next_base_addr + slack, align. bytes ( ) ) ;
92
+ entry . insert ( base_addr) ;
91
93
92
- // Remember next base address.
93
- global_state. next_base_addr = base_addr + alloc. bytes . len ( ) as u64 ;
94
+ // Remember next base address. If this allocation is zero-sized, leave a gap
95
+ // of at least 1 to avoid two allocations having the same base address.
96
+ global_state. next_base_addr = base_addr + max ( size. bytes ( ) , 1 ) ;
94
97
// Given that `next_base_addr` increases in each allocation, pushing the
95
98
// corresponding tuple keeps `int_to_ptr_map` sorted
96
99
global_state. int_to_ptr_map . push ( ( base_addr, ptr. alloc_id ) ) ;
@@ -99,7 +102,7 @@ impl<'mir, 'tcx> GlobalState {
99
102
}
100
103
} ;
101
104
102
- debug_assert_eq ! ( base_addr % align, 0 ) ; // sanity check
105
+ debug_assert_eq ! ( base_addr % align. bytes ( ) , 0 ) ; // sanity check
103
106
Ok ( base_addr + ptr. offset . bytes ( ) )
104
107
}
105
108
0 commit comments