@@ -4,70 +4,89 @@ use rand::Rng;
44
55use rustc_target:: abi:: { Align , Size } ;
66
7- const MAX_POOL_SIZE : usize = 64 ;
7+ use crate :: { concurrency :: VClock , MemoryKind } ;
88
9- // Just use fair coins, until we have evidence that other numbers are better.
10- const ADDR_REMEMBER_CHANCE : f64 = 0.5 ;
11- const ADDR_TAKE_CHANCE : f64 = 0.5 ;
9+ const MAX_POOL_SIZE : usize = 64 ;
1210
1311/// The pool strikes a balance between exploring more possible executions and making it more likely
1412/// to find bugs. The hypothesis is that bugs are more likely to occur when reuse happens for
1513/// allocations with the same layout, since that can trigger e.g. ABA issues in a concurrent data
1614/// structure. Therefore we only reuse allocations when size and alignment match exactly.
1715#[ derive( Debug ) ]
1816pub struct ReusePool {
17+ address_reuse_rate : f64 ,
1918 /// The i-th element in `pool` stores allocations of alignment `2^i`. We store these reusable
2019 /// allocations as address-size pairs, the list must be sorted by the size.
2120 ///
2221 /// Each of these maps has at most MAX_POOL_SIZE elements, and since alignment is limited to
2322 /// less than 64 different possible value, that bounds the overall size of the pool.
24- pool : Vec < Vec < ( u64 , Size ) > > ,
23+ ///
24+ /// We also store the clock from the thread that donated this pool element,
25+ /// to ensure synchronization with the thread that picks up this address.
26+ pool : Vec < Vec < ( u64 , Size , VClock ) > > ,
2527}
2628
2729impl ReusePool {
28- pub fn new ( ) -> Self {
29- ReusePool { pool : vec ! [ ] }
30+ pub fn new ( address_reuse_rate : f64 ) -> Self {
31+ ReusePool { address_reuse_rate , pool : vec ! [ ] }
3032 }
3133
32- fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size ) > {
34+ fn subpool ( & mut self , align : Align ) -> & mut Vec < ( u64 , Size , VClock ) > {
3335 let pool_idx: usize = align. bytes ( ) . trailing_zeros ( ) . try_into ( ) . unwrap ( ) ;
3436 if self . pool . len ( ) <= pool_idx {
3537 self . pool . resize ( pool_idx + 1 , Vec :: new ( ) ) ;
3638 }
3739 & mut self . pool [ pool_idx]
3840 }
3941
40- pub fn add_addr ( & mut self , rng : & mut impl Rng , addr : u64 , size : Size , align : Align ) {
42+ pub fn add_addr (
43+ & mut self ,
44+ rng : & mut impl Rng ,
45+ addr : u64 ,
46+ size : Size ,
47+ align : Align ,
48+ kind : MemoryKind ,
49+ clock : impl FnOnce ( ) -> VClock ,
50+ ) {
4151 // Let's see if we even want to remember this address.
42- if !rng. gen_bool ( ADDR_REMEMBER_CHANCE ) {
52+ // We don't remember stack addresses: there's a lot of them (so the perf impact is big),
53+ // and we only want to reuse stack slots within the same thread or else we'll add a lot of
54+ // undesired synchronization.
55+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
4356 return ;
4457 }
4558 // Determine the pool to add this to, and where in the pool to put it.
4659 let subpool = self . subpool ( align) ;
47- let pos = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
60+ let pos = subpool. partition_point ( |( _addr, other_size, _ ) | * other_size < size) ;
4861 // Make sure the pool does not grow too big.
4962 if subpool. len ( ) >= MAX_POOL_SIZE {
5063 // Pool full. Replace existing element, or last one if this would be even bigger.
5164 let clamped_pos = pos. min ( subpool. len ( ) - 1 ) ;
52- subpool[ clamped_pos] = ( addr, size) ;
65+ subpool[ clamped_pos] = ( addr, size, clock ( ) ) ;
5366 return ;
5467 }
5568 // Add address to pool, at the right position.
56- subpool. insert ( pos, ( addr, size) ) ;
69+ subpool. insert ( pos, ( addr, size, clock ( ) ) ) ;
5770 }
5871
59- pub fn take_addr ( & mut self , rng : & mut impl Rng , size : Size , align : Align ) -> Option < u64 > {
60- // Determine whether we'll even attempt a reuse.
61- if !rng. gen_bool ( ADDR_TAKE_CHANCE ) {
72+ pub fn take_addr (
73+ & mut self ,
74+ rng : & mut impl Rng ,
75+ size : Size ,
76+ align : Align ,
77+ kind : MemoryKind ,
78+ ) -> Option < ( u64 , VClock ) > {
79+ // Determine whether we'll even attempt a reuse. As above, we don't do reuse for stack addresses.
80+ if kind == MemoryKind :: Stack || !rng. gen_bool ( self . address_reuse_rate ) {
6281 return None ;
6382 }
6483 // Determine the pool to take this from.
6584 let subpool = self . subpool ( align) ;
6685 // Let's see if we can find something of the right size. We want to find the full range of
6786 // such items, beginning with the first, so we can't use `binary_search_by_key`.
68- let begin = subpool. partition_point ( |( _addr, other_size) | * other_size < size) ;
87+ let begin = subpool. partition_point ( |( _addr, other_size, _ ) | * other_size < size) ;
6988 let mut end = begin;
70- while let Some ( ( _addr, other_size) ) = subpool. get ( end) {
89+ while let Some ( ( _addr, other_size, _ ) ) = subpool. get ( end) {
7190 if * other_size != size {
7291 break ;
7392 }
@@ -80,8 +99,8 @@ impl ReusePool {
8099 // Pick a random element with the desired size.
81100 let idx = rng. gen_range ( begin..end) ;
82101 // Remove it from the pool and return.
83- let ( chosen_addr, chosen_size) = subpool. remove ( idx) ;
102+ let ( chosen_addr, chosen_size, clock ) = subpool. remove ( idx) ;
84103 debug_assert ! ( chosen_size >= size && chosen_addr % align. bytes( ) == 0 ) ;
85- Some ( chosen_addr)
104+ Some ( ( chosen_addr, clock ) )
86105 }
87106}
0 commit comments