@@ -4,73 +4,113 @@ use rand::Rng;
44
55use rustc_target:: abi:: { Align , Size } ;
66
7- const MAX_POOL_SIZE : usize = 64 ;
7+ use crate :: { concurrency :: VClock , MemoryKind , MiriConfig , ThreadId } ;
88
9- // Just use fair coins, until we have evidence that other numbers are better.
10- const ADDR_REMEMBER_CHANCE : f64 = 0.5 ;
11- const ADDR_TAKE_CHANCE : f64 = 0.5 ;
9+ const MAX_POOL_SIZE : usize = 64 ;
1210
1311/// The pool strikes a balance between exploring more possible executions and making it more likely
1412/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
1513/// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data
1614/// structure. Therefore we only reuse allocations when size and alignment match exactly.
1715#[ derive( Debug ) ]
1816pub struct ReusePool {
17+ address_reuse_rate : f64 ,
18+ address_reuse_cross_thread_rate : f64 ,
1919 /// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
20- /// allocations as address-size pairs, the list must be sorted by the size.
20+ /// allocations as address-size pairs, the list must be sorted by the size and then the thread ID .
2121 ///
2222 /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
2323 /// less than 64 different possible value, that bounds the overall size of the pool.
24- pool : Vec < Vec < ( u64 , Size ) > > ,
24+ ///
25+ /// We also store the ID and the data-race clock of the thread that donated this pool element,
26+ /// to ensure synchronization with the thread that picks up this address.
27+ pool : Vec < Vec < ( u64 , Size , ThreadId , VClock ) > > ,
2528}
2629
2730impl ReusePool {
28- pub fn new ( ) -> Self {
29- ReusePool { pool : vec ! [ ] }
31+ pub fn new ( config : & MiriConfig ) -> Self {
32+ ReusePool {
33+ address_reuse_rate : config. address_reuse_rate ,
34+ address_reuse_cross_thread_rate : config. address_reuse_cross_thread_rate ,
35+ pool : vec ! [ ] ,
36+ }
3037 }
3138
32- fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size ) > {
39+ fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size , ThreadId , VClock ) > {
3340 let pool_idx: usize = align. bytes ( ) . trailing_zeros ( ) . try_into ( ) . unwrap ( ) ;
3441 if self . pool . len ( ) <= pool_idx {
3542 self . pool . resize ( pool_idx + 1 , Vec :: new ( ) ) ;
3643 }
3744 & mut self . pool [ pool_idx]
3845 }
3946
40- pub fn add_addr ( & mut self , rng : & mut impl Rng , addr : u64 , size : Size , align : Align ) {
47+ pub fn add_addr (
48+ & mut self ,
49+ rng : & mut impl Rng ,
50+ addr : u64 ,
51+ size : Size ,
52+ align : Align ,
53+ kind : MemoryKind ,
54+ thread : ThreadId ,
55+ clock : impl FnOnce ( ) -> VClock ,
56+ ) {
4157 // Let's see if we even want to remember this address.
42- if !rng. gen_bool ( ADDR_REMEMBER_CHANCE ) {
58+ // We don't remember stack addresses: there's a lot of them (so the perf impact is big),
59+ // and we only want to reuse stack slots within the same thread or else we'll add a lot of
60+ // undesired synchronization.
61+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
4362 return ;
4463 }
64+ let clock = clock ( ) ;
4565 // Determine the pool to add this to, and where in the pool to put it.
4666 let subpool = self . subpool ( align) ;
47- let pos = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
67+ let pos = subpool. partition_point ( |( _addr, other_size, other_thread, _) | {
68+ ( * other_size, * other_thread) < ( size, thread)
69+ } ) ;
4870 // Make sure the pool does not grow too big.
4971 if subpool. len ( ) >= MAX_POOL_SIZE {
5072 // Pool full. Replace existing element, or last one if this would be even bigger.
5173 let clamped_pos = pos. min ( subpool. len ( ) - 1 ) ;
52- subpool[ clamped_pos] = ( addr, size) ;
74+ subpool[ clamped_pos] = ( addr, size, thread , clock ) ;
5375 return ;
5476 }
5577 // Add address to pool, at the right position.
56- subpool. insert ( pos, ( addr, size) ) ;
78+ subpool. insert ( pos, ( addr, size, thread , clock ) ) ;
5779 }
5880
59- pub fn take_addr ( & mut self , rng : & mut impl Rng , size : Size , align : Align ) -> Option < u64 > {
60- // Determine whether we'll even attempt a reuse.
61- if !rng. gen_bool ( ADDR_TAKE_CHANCE ) {
81+ /// Returns the address to use and optionally a clock we have to synchronize with.
82+ pub fn take_addr (
83+ & mut self ,
84+ rng : & mut impl Rng ,
85+ size : Size ,
86+ align : Align ,
87+ kind : MemoryKind ,
88+ thread : ThreadId ,
89+ ) -> Option < ( u64 , Option < VClock > ) > {
90+ // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
91+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
6292 return None ;
6393 }
94+ let cross_thread_reuse = rng. gen_bool ( self . address_reuse_cross_thread_rate ) ;
6495 // Determine the pool to take this from.
6596 let subpool = self . subpool ( align) ;
6697 // Let's see if we can find something of the right size. We want to find the full range of
67- // such items, beginning with the first, so we can't use `binary_search_by_key`.
68- let begin = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
98+ // such items, beginning with the first, so we can't use `binary_search_by_key`. If we do
99+ // *not* want to consider other thread's allocations, we effectively use the lexicographic
100+ // order on `(size, thread)`.
101+ let begin = subpool. partition_point ( |( _addr, other_size, other_thread, _) | {
102+ * other_size < size
103+ || ( * other_size == size && !cross_thread_reuse && * other_thread < thread)
104+ } ) ;
69105 let mut end = begin;
70- while let Some ( ( _addr, other_size) ) = subpool. get ( end) {
106+ while let Some ( ( _addr, other_size, other_thread , _ ) ) = subpool. get ( end) {
71107 if * other_size != size {
72108 break ;
73109 }
110+ if !cross_thread_reuse && * other_thread != thread {
111+ // We entered the allocations of another thread.
112+ break ;
113+ }
74114 end += 1 ;
75115 }
76116 if end == begin {
@@ -80,8 +120,10 @@ impl ReusePool {
80120 // Pick a random element with the desired size.
81121 let idx = rng. gen_range ( begin..end) ;
82122 // Remove it from the pool and return.
83- let ( chosen_addr, chosen_size) = subpool. remove ( idx) ;
123+ let ( chosen_addr, chosen_size, chosen_thread , clock ) = subpool. remove ( idx) ;
84124 debug_assert ! ( chosen_size >= size && chosen_addr % align. bytes( ) == 0 ) ;
85- Some ( chosen_addr)
125+ debug_assert ! ( cross_thread_reuse || chosen_thread == thread) ;
126+ // No synchronization needed if we reused from the current thread.
127+ Some ( ( chosen_addr, if chosen_thread == thread { None } else { Some ( clock) } ) )
86128 }
87129}
0 commit comments