From c678bd722ea6fe8d1827787d5ecd411a77f411ca Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Sun, 24 Jan 2021 13:45:45 -0800 Subject: [PATCH 1/5] Add random failures to compare_exchange_weak --- src/data_race.rs | 21 ++++++++++++++++----- src/shims/intrinsics.rs | 18 +++++++++++------- tests/run-pass/atomic.rs | 26 ++++++++++++++++++-------- 3 files changed, 45 insertions(+), 20 deletions(-) diff --git a/src/data_race.rs b/src/data_race.rs index d204d60f15..31a167af88 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -75,7 +75,7 @@ use rustc_target::abi::Size; use crate::{ ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt, - OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, + OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp, VectorIdx, MemoryKind, MiriMemoryKind }; @@ -544,7 +544,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { /// Perform an atomic compare and exchange at a given memory location. /// On success an atomic RMW operation is performed and on failure - /// only an atomic read occurs. + /// only an atomic read occurs. If `can_fail_spuriously` is true, + /// then we treat it as a "compare_exchange_weak" operation, and + /// some portion of the time fail even when the values are actually + /// identical. fn atomic_compare_exchange_scalar( &mut self, place: MPlaceTy<'tcx, Tag>, @@ -552,7 +555,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { new: ScalarMaybeUninit, success: AtomicRwOp, fail: AtomicReadOp, + can_fail_spuriously: bool, ) -> InterpResult<'tcx, Immediate> { + use rand::Rng as _; let this = self.eval_context_mut(); // Failure ordering cannot be stronger than success ordering, therefore first attempt @@ -560,15 +565,21 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { // read ordering and write in the success case. // Read as immediate for the sake of `binary_op()` let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?; - // `binary_op` will bail if either of them is not a scalar. let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0; - let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into()); + // If the operation would succeed, but is "weak", fail 50% of the time. + // FIXME: this is quite arbitrary. + let cmpxchg_success = eq.to_bool()? + && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen_range(0, 2) == 0); + let res = Immediate::ScalarPair( + old.to_scalar_or_uninit(), + Scalar::from_bool(cmpxchg_success).into(), + ); // Update ptr depending on comparison. // if successful, perform a full rw-atomic validation // otherwise treat this as an atomic load with the fail ordering. - if eq.to_bool()? { + if cmpxchg_success { this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?; this.validate_atomic_rmw(place, success)?; } else { diff --git a/src/shims/intrinsics.rs b/src/shims/intrinsics.rs index 840dd9f143..73419a9f97 100644 --- a/src/shims/intrinsics.rs +++ b/src/shims/intrinsics.rs @@ -518,9 +518,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(()) } - fn atomic_compare_exchange( + fn atomic_compare_exchange_impl( &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, - success: AtomicRwOp, fail: AtomicReadOp + success: AtomicRwOp, fail: AtomicReadOp, can_fail_spuriously: bool ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); @@ -538,7 +538,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx let old = this.atomic_compare_exchange_scalar( - place, expect_old, new, success, fail + place, expect_old, new, success, fail, can_fail_spuriously )?; // Return old value. @@ -546,14 +546,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx Ok(()) } - fn atomic_compare_exchange_weak( + fn atomic_compare_exchange( &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, success: AtomicRwOp, fail: AtomicReadOp ) -> InterpResult<'tcx> { + self.atomic_compare_exchange_impl(args, dest, success, fail, false) + } - // FIXME: the weak part of this is currently not modelled, - // it is assumed to always succeed unconditionally. - self.atomic_compare_exchange(args, dest, success, fail) + fn atomic_compare_exchange_weak( + &mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, + success: AtomicRwOp, fail: AtomicReadOp + ) -> InterpResult<'tcx> { + self.atomic_compare_exchange_impl(args, dest, success, fail, true) } fn float_to_int_unchecked( diff --git a/tests/run-pass/atomic.rs b/tests/run-pass/atomic.rs index b03a8c8901..4ebca71c7c 100644 --- a/tests/run-pass/atomic.rs +++ b/tests/run-pass/atomic.rs @@ -24,7 +24,20 @@ fn atomic_bool() { assert_eq!(*ATOMIC.get_mut(), false); } } - +// There isn't a trait to use to make this generic, so just use a macro +macro_rules! compare_exchange_weak_loop { + ($atom:expr, $from:expr, $to:expr, $succ_order:expr, $fail_order:expr) => { + loop { + match $atom.compare_exchange_weak($from, $to, $succ_order, $fail_order) { + Ok(n) => { + assert_eq!(n, $from); + break; + } + Err(n) => assert_eq!(n, $from), + } + } + }; +} fn atomic_isize() { static ATOMIC: AtomicIsize = AtomicIsize::new(0); @@ -40,11 +53,11 @@ fn atomic_isize() { ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok(); ATOMIC.store(0, SeqCst); - - assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed), Ok(0)); + compare_exchange_weak_loop!(ATOMIC, 0, 1, Relaxed, Relaxed); assert_eq!(ATOMIC.compare_exchange_weak(0, 2, Acquire, Relaxed), Err(1)); assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed), Err(1)); - assert_eq!(ATOMIC.compare_exchange_weak(1, 0, AcqRel, Relaxed), Ok(1)); + compare_exchange_weak_loop!(ATOMIC, 1, 0, AcqRel, Relaxed); + assert_eq!(ATOMIC.load(Relaxed), 0); ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok(); ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok(); ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok(); @@ -58,10 +71,7 @@ fn atomic_u64() { ATOMIC.store(1, SeqCst); assert_eq!(ATOMIC.compare_exchange(0, 0x100, AcqRel, Acquire), Err(1)); - assert_eq!( - ATOMIC.compare_exchange_weak(1, 0x100, AcqRel, Acquire), - Ok(1) - ); + compare_exchange_weak_loop!(ATOMIC, 1, 0x100, AcqRel, Acquire); assert_eq!(ATOMIC.load(Relaxed), 0x100); } From b6eccc6482ba9524b0ad7f0561bc0e77adcd5d24 Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Mon, 25 Jan 2021 02:41:03 -0800 Subject: [PATCH 2/5] Test that _weak atomics sometimes fail --- tests/run-pass/atomic.rs | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/tests/run-pass/atomic.rs b/tests/run-pass/atomic.rs index 4ebca71c7c..4871aa9fe0 100644 --- a/tests/run-pass/atomic.rs +++ b/tests/run-pass/atomic.rs @@ -5,6 +5,7 @@ fn main() { atomic_isize(); atomic_u64(); atomic_fences(); + weak_sometimes_fails(); } fn atomic_bool() { @@ -85,3 +86,17 @@ fn atomic_fences() { compiler_fence(Acquire); compiler_fence(AcqRel); } + +fn weak_sometimes_fails() { + static ATOMIC: AtomicBool = AtomicBool::new(false); + let tries = 20; + for _ in 0..tries { + let cur = ATOMIC.load(Relaxed); + // Try (weakly) to flip the flag. + if ATOMIC.compare_exchange_weak(cur, !cur, Relaxed, Relaxed).is_err() { + // We succeeded, so return and skip the panic. + return; + } + } + panic!("compare_exchange_weak succeeded {} tries in a row", tries); +} From efd2d55e001de2a069d9ad45687abb5834f227da Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Mon, 25 Jan 2021 02:52:55 -0800 Subject: [PATCH 3/5] review nits --- tests/run-pass/atomic.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/run-pass/atomic.rs b/tests/run-pass/atomic.rs index 4871aa9fe0..66d25ca01e 100644 --- a/tests/run-pass/atomic.rs +++ b/tests/run-pass/atomic.rs @@ -88,13 +88,13 @@ fn atomic_fences() { } fn weak_sometimes_fails() { - static ATOMIC: AtomicBool = AtomicBool::new(false); + let atomic = AtomicBool::new(false); let tries = 20; for _ in 0..tries { - let cur = ATOMIC.load(Relaxed); + let cur = atomic.load(Relaxed); // Try (weakly) to flip the flag. - if ATOMIC.compare_exchange_weak(cur, !cur, Relaxed, Relaxed).is_err() { - // We succeeded, so return and skip the panic. + if atomic.compare_exchange_weak(cur, !cur, Relaxed, Relaxed).is_err() { + // We failed, so return and skip the panic. return; } } From d4b592ed17471622d27deeab8a1d8d89bc5c17d1 Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Mon, 25 Jan 2021 02:59:12 -0800 Subject: [PATCH 4/5] Fail 80% of the time on weak cmpxchg, not 50% --- src/data_race.rs | 4 ++-- tests/run-pass/atomic.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/data_race.rs b/src/data_race.rs index 31a167af88..dd0dccd0e9 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -567,10 +567,10 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?; // `binary_op` will bail if either of them is not a scalar. let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0; - // If the operation would succeed, but is "weak", fail 50% of the time. + // If the operation would succeed, but is "weak", fail 80% of the time. // FIXME: this is quite arbitrary. let cmpxchg_success = eq.to_bool()? - && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen_range(0, 2) == 0); + && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen_range(0, 10) < 8); let res = Immediate::ScalarPair( old.to_scalar_or_uninit(), Scalar::from_bool(cmpxchg_success).into(), diff --git a/tests/run-pass/atomic.rs b/tests/run-pass/atomic.rs index 66d25ca01e..4f27c2bd54 100644 --- a/tests/run-pass/atomic.rs +++ b/tests/run-pass/atomic.rs @@ -89,7 +89,7 @@ fn atomic_fences() { fn weak_sometimes_fails() { let atomic = AtomicBool::new(false); - let tries = 20; + let tries = 100; for _ in 0..tries { let cur = atomic.load(Relaxed); // Try (weakly) to flip the flag. From d310620c11bb61156d9198b7c5c3c574846ff73d Mon Sep 17 00:00:00 2001 From: Thom Chiovoloni Date: Tue, 26 Jan 2021 00:07:43 -0800 Subject: [PATCH 5/5] Allow configuring the failure rate with -Zmiri-compare-exchange-weak-failure-rate --- src/bin/miri.rs | 8 ++++++++ src/data_race.rs | 7 ++++--- src/eval.rs | 4 ++++ src/machine.rs | 4 ++++ 4 files changed, 20 insertions(+), 3 deletions(-) diff --git a/src/bin/miri.rs b/src/bin/miri.rs index 1117b69116..bd3bc8dbb4 100644 --- a/src/bin/miri.rs +++ b/src/bin/miri.rs @@ -282,6 +282,14 @@ fn main() { }; miri_config.tracked_alloc_id = Some(miri::AllocId(id)); } + arg if arg.starts_with("-Zmiri-compare-exchange-weak-failure-rate=") => { + let rate = match arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=").unwrap().parse::() { + Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate, + Ok(_) => panic!("-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`"), + Err(err) => panic!("-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}", err), + }; + miri_config.cmpxchg_weak_failure_rate = rate; + } _ => { // Forward to rustc. rustc_args.push(arg); diff --git a/src/data_race.rs b/src/data_race.rs index dd0dccd0e9..f79775e12f 100644 --- a/src/data_race.rs +++ b/src/data_race.rs @@ -567,10 +567,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?; // `binary_op` will bail if either of them is not a scalar. let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0; - // If the operation would succeed, but is "weak", fail 80% of the time. - // FIXME: this is quite arbitrary. + // If the operation would succeed, but is "weak", fail some portion + // of the time, based on `rate`. + let rate = this.memory.extra.cmpxchg_weak_failure_rate; let cmpxchg_success = eq.to_bool()? - && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen_range(0, 10) < 8); + && (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::() < rate); let res = Immediate::ScalarPair( old.to_scalar_or_uninit(), Scalar::from_bool(cmpxchg_success).into(), diff --git a/src/eval.rs b/src/eval.rs index 0a62f14dd3..b6d4fa05e1 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -50,6 +50,9 @@ pub struct MiriConfig { pub track_raw: bool, /// Determine if data race detection should be enabled pub data_race_detector: bool, + /// Rate of spurious failures for compare_exchange_weak atomic operations, + /// between 0.0 and 1.0, defaulting to 0.8 (80% chance of failure). + pub cmpxchg_weak_failure_rate: f64, } impl Default for MiriConfig { @@ -68,6 +71,7 @@ impl Default for MiriConfig { tracked_alloc_id: None, track_raw: false, data_race_detector: true, + cmpxchg_weak_failure_rate: 0.8, } } } diff --git a/src/machine.rs b/src/machine.rs index d28aa34c75..60a6dae0f8 100644 --- a/src/machine.rs +++ b/src/machine.rs @@ -135,6 +135,9 @@ pub struct MemoryExtra { /// Controls whether alignment of memory accesses is being checked. pub(crate) check_alignment: AlignmentCheck, + + /// Failure rate of compare_exchange_weak, between 0.0 and 1.0 + pub(crate) cmpxchg_weak_failure_rate: f64, } impl MemoryExtra { @@ -162,6 +165,7 @@ impl MemoryExtra { rng: RefCell::new(rng), tracked_alloc_id: config.tracked_alloc_id, check_alignment: config.check_alignment, + cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate, } }