2020// "Mathematizing C++ concurrency", ACM SIGPLAN Notices, vol. 46, no. 1, pp. 55-66, 2011.
2121// Available: https://ss265.host.cs.st-andrews.ac.uk/papers/n3132.pdf.
2222
23- use std:: sync:: atomic:: AtomicUsize ;
2423use std:: sync:: atomic:: Ordering :: * ;
24+ use std:: sync:: atomic:: { fence, AtomicBool , AtomicI32 } ;
2525use std:: thread:: spawn;
2626
2727#[ derive( Copy , Clone ) ]
@@ -32,13 +32,17 @@ unsafe impl<T> Sync for EvilSend<T> {}
3232
3333// We can't create static items because we need to run each test
3434// multiple times
35- fn static_atomic ( val : usize ) -> & ' static AtomicUsize {
36- let ret = Box :: leak ( Box :: new ( AtomicUsize :: new ( val) ) ) ;
35+ fn static_atomic ( val : i32 ) -> & ' static AtomicI32 {
36+ let ret = Box :: leak ( Box :: new ( AtomicI32 :: new ( val) ) ) ;
37+ ret
38+ }
39+ fn static_atomic_bool ( val : bool ) -> & ' static AtomicBool {
40+ let ret = Box :: leak ( Box :: new ( AtomicBool :: new ( val) ) ) ;
3741 ret
3842}
3943
4044// Spins until it acquires a pre-determined value.
41- fn acquires_value ( loc : & AtomicUsize , val : usize ) -> usize {
45+ fn acquires_value ( loc : & AtomicI32 , val : i32 ) -> i32 {
4246 while loc. load ( Acquire ) != val {
4347 std:: hint:: spin_loop ( ) ;
4448 }
@@ -207,7 +211,7 @@ fn test_sc_store_buffering() {
207211}
208212
209213fn test_single_thread ( ) {
210- let x = AtomicUsize :: new ( 42 ) ;
214+ let x = AtomicI32 :: new ( 42 ) ;
211215
212216 assert_eq ! ( x. load( Relaxed ) , 42 ) ;
213217
@@ -216,6 +220,42 @@ fn test_single_thread() {
216220 assert_eq ! ( x. load( Relaxed ) , 43 ) ;
217221}
218222
223+ fn test_sync_through_rmw_and_fences ( ) {
224+ // Example from https://github.com/llvm/llvm-project/issues/56450#issuecomment-1183695905
225+ #[ no_mangle]
226+ pub fn rdmw ( storing : & AtomicI32 , sync : & AtomicI32 , loading : & AtomicI32 ) -> i32 {
227+ storing. store ( 1 , Relaxed ) ;
228+ fence ( Release ) ;
229+ sync. fetch_add ( 0 , Relaxed ) ;
230+ fence ( Acquire ) ;
231+ loading. load ( Relaxed )
232+ }
233+
234+ let x = static_atomic ( 0 ) ;
235+ let y = static_atomic ( 0 ) ;
236+ let z = static_atomic ( 0 ) ;
237+
238+ // Since each thread is so short, we need to make sure that they truely run at the same time
239+ // Otherwise t1 will finish before t2 even starts
240+ let go = static_atomic_bool ( false ) ;
241+
242+ let t1 = spawn ( move || {
243+ while !go. load ( Relaxed ) { }
244+ rdmw ( y, x, z)
245+ } ) ;
246+
247+ let t2 = spawn ( move || {
248+ while !go. load ( Relaxed ) { }
249+ rdmw ( z, x, y)
250+ } ) ;
251+
252+ go. store ( true , Relaxed ) ;
253+
254+ let a = t1. join ( ) . unwrap ( ) ;
255+ let b = t2. join ( ) . unwrap ( ) ;
256+ assert_ne ! ( ( a, b) , ( 0 , 0 ) ) ;
257+ }
258+
219259pub fn main ( ) {
220260 for _ in 0 ..50 {
221261 test_single_thread ( ) ;
@@ -225,5 +265,6 @@ pub fn main() {
225265 test_wrc ( ) ;
226266 test_corr ( ) ;
227267 test_sc_store_buffering ( ) ;
268+ test_sync_through_rmw_and_fences ( ) ;
228269 }
229270}
0 commit comments