Skip to content

Commit

Permalink
Auto merge of #1686 - thomcc:cmpxchg_weak, r=oli-obk
Browse files Browse the repository at this point in the history
Add random failures to compare_exchange_weak

In practice this is pretty useful for detecting bugs.

This fails more frequently than realistic (~~50%~~ (now 80%, controlled by a flag) of the time). I couldn't find any existing code that tries to model this (tsan, cdschecker, etc all seem to have TODOs there). Relacy models it with a 25% or 50% failure chance depending on some settings.

CC `@JCTyblaidd` who wrote the code this modifies initially, and seems interested in this subject.
  • Loading branch information
bors committed Jan 28, 2021
2 parents e23e0b2 + d310620 commit a0485c5
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 20 deletions.
8 changes: 8 additions & 0 deletions src/bin/miri.rs
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,14 @@ fn main() {
};
miri_config.tracked_alloc_id = Some(miri::AllocId(id));
}
arg if arg.starts_with("-Zmiri-compare-exchange-weak-failure-rate=") => {
let rate = match arg.strip_prefix("-Zmiri-compare-exchange-weak-failure-rate=").unwrap().parse::<f64>() {
Ok(rate) if rate >= 0.0 && rate <= 1.0 => rate,
Ok(_) => panic!("-Zmiri-compare-exchange-weak-failure-rate must be between `0.0` and `1.0`"),
Err(err) => panic!("-Zmiri-compare-exchange-weak-failure-rate requires a `f64` between `0.0` and `1.0`: {}", err),
};
miri_config.cmpxchg_weak_failure_rate = rate;
}
_ => {
// Forward to rustc.
rustc_args.push(arg);
Expand Down
22 changes: 17 additions & 5 deletions src/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ use rustc_target::abi::Size;

use crate::{
ImmTy, Immediate, InterpResult, MPlaceTy, MemPlaceMeta, MiriEvalContext, MiriEvalContextExt,
OpTy, Pointer, RangeMap, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
OpTy, Pointer, RangeMap, Scalar, ScalarMaybeUninit, Tag, ThreadId, VClock, VTimestamp,
VectorIdx, MemoryKind, MiriMemoryKind
};

Expand Down Expand Up @@ -544,31 +544,43 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {

/// Perform an atomic compare and exchange at a given memory location.
/// On success an atomic RMW operation is performed and on failure
/// only an atomic read occurs.
/// only an atomic read occurs. If `can_fail_spuriously` is true,
/// then we treat it as a "compare_exchange_weak" operation, and
/// some portion of the time fail even when the values are actually
/// identical.
fn atomic_compare_exchange_scalar(
&mut self,
place: MPlaceTy<'tcx, Tag>,
expect_old: ImmTy<'tcx, Tag>,
new: ScalarMaybeUninit<Tag>,
success: AtomicRwOp,
fail: AtomicReadOp,
can_fail_spuriously: bool,
) -> InterpResult<'tcx, Immediate<Tag>> {
use rand::Rng as _;
let this = self.eval_context_mut();

// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successful then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
let old = this.allow_data_races_mut(|this| this.read_immediate(place.into()))?;

// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
// If the operation would succeed, but is "weak", fail some portion
// of the time, based on `rate`.
let rate = this.memory.extra.cmpxchg_weak_failure_rate;
let cmpxchg_success = eq.to_bool()?
&& (!can_fail_spuriously || this.memory.extra.rng.borrow_mut().gen::<f64>() < rate);
let res = Immediate::ScalarPair(
old.to_scalar_or_uninit(),
Scalar::from_bool(cmpxchg_success).into(),
);

// Update ptr depending on comparison.
// if successful, perform a full rw-atomic validation
// otherwise treat this as an atomic load with the fail ordering.
if eq.to_bool()? {
if cmpxchg_success {
this.allow_data_races_mut(|this| this.write_scalar(new, place.into()))?;
this.validate_atomic_rmw(place, success)?;
} else {
Expand Down
4 changes: 4 additions & 0 deletions src/eval.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,9 @@ pub struct MiriConfig {
pub track_raw: bool,
/// Determine if data race detection should be enabled
pub data_race_detector: bool,
/// Rate of spurious failures for compare_exchange_weak atomic operations,
/// between 0.0 and 1.0, defaulting to 0.8 (80% chance of failure).
pub cmpxchg_weak_failure_rate: f64,
}

impl Default for MiriConfig {
Expand All @@ -68,6 +71,7 @@ impl Default for MiriConfig {
tracked_alloc_id: None,
track_raw: false,
data_race_detector: true,
cmpxchg_weak_failure_rate: 0.8,
}
}
}
Expand Down
4 changes: 4 additions & 0 deletions src/machine.rs
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,9 @@ pub struct MemoryExtra {

/// Controls whether alignment of memory accesses is being checked.
pub(crate) check_alignment: AlignmentCheck,

/// Failure rate of compare_exchange_weak, between 0.0 and 1.0
pub(crate) cmpxchg_weak_failure_rate: f64,
}

impl MemoryExtra {
Expand Down Expand Up @@ -162,6 +165,7 @@ impl MemoryExtra {
rng: RefCell::new(rng),
tracked_alloc_id: config.tracked_alloc_id,
check_alignment: config.check_alignment,
cmpxchg_weak_failure_rate: config.cmpxchg_weak_failure_rate,
}
}

Expand Down
18 changes: 11 additions & 7 deletions src/shims/intrinsics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -518,9 +518,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Ok(())
}

fn atomic_compare_exchange(
fn atomic_compare_exchange_impl(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRwOp, fail: AtomicReadOp
success: AtomicRwOp, fail: AtomicReadOp, can_fail_spuriously: bool
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();

Expand All @@ -538,22 +538,26 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx


let old = this.atomic_compare_exchange_scalar(
place, expect_old, new, success, fail
place, expect_old, new, success, fail, can_fail_spuriously
)?;

// Return old value.
this.write_immediate(old, dest)?;
Ok(())
}

fn atomic_compare_exchange_weak(
fn atomic_compare_exchange(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRwOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, false)
}

// FIXME: the weak part of this is currently not modelled,
// it is assumed to always succeed unconditionally.
self.atomic_compare_exchange(args, dest, success, fail)
fn atomic_compare_exchange_weak(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRwOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
self.atomic_compare_exchange_impl(args, dest, success, fail, true)
}

fn float_to_int_unchecked<F>(
Expand Down
41 changes: 33 additions & 8 deletions tests/run-pass/atomic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ fn main() {
atomic_isize();
atomic_u64();
atomic_fences();
weak_sometimes_fails();
}

fn atomic_bool() {
Expand All @@ -24,7 +25,20 @@ fn atomic_bool() {
assert_eq!(*ATOMIC.get_mut(), false);
}
}

// There isn't a trait to use to make this generic, so just use a macro
macro_rules! compare_exchange_weak_loop {
($atom:expr, $from:expr, $to:expr, $succ_order:expr, $fail_order:expr) => {
loop {
match $atom.compare_exchange_weak($from, $to, $succ_order, $fail_order) {
Ok(n) => {
assert_eq!(n, $from);
break;
}
Err(n) => assert_eq!(n, $from),
}
}
};
}
fn atomic_isize() {
static ATOMIC: AtomicIsize = AtomicIsize::new(0);

Expand All @@ -40,11 +54,11 @@ fn atomic_isize() {
ATOMIC.compare_exchange(0, 1, SeqCst, SeqCst).ok();

ATOMIC.store(0, SeqCst);

assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Relaxed, Relaxed), Ok(0));
compare_exchange_weak_loop!(ATOMIC, 0, 1, Relaxed, Relaxed);
assert_eq!(ATOMIC.compare_exchange_weak(0, 2, Acquire, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange_weak(0, 1, Release, Relaxed), Err(1));
assert_eq!(ATOMIC.compare_exchange_weak(1, 0, AcqRel, Relaxed), Ok(1));
compare_exchange_weak_loop!(ATOMIC, 1, 0, AcqRel, Relaxed);
assert_eq!(ATOMIC.load(Relaxed), 0);
ATOMIC.compare_exchange_weak(0, 1, AcqRel, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, SeqCst, Relaxed).ok();
ATOMIC.compare_exchange_weak(0, 1, Acquire, Acquire).ok();
Expand All @@ -58,10 +72,7 @@ fn atomic_u64() {

ATOMIC.store(1, SeqCst);
assert_eq!(ATOMIC.compare_exchange(0, 0x100, AcqRel, Acquire), Err(1));
assert_eq!(
ATOMIC.compare_exchange_weak(1, 0x100, AcqRel, Acquire),
Ok(1)
);
compare_exchange_weak_loop!(ATOMIC, 1, 0x100, AcqRel, Acquire);
assert_eq!(ATOMIC.load(Relaxed), 0x100);
}

Expand All @@ -75,3 +86,17 @@ fn atomic_fences() {
compiler_fence(Acquire);
compiler_fence(AcqRel);
}

fn weak_sometimes_fails() {
let atomic = AtomicBool::new(false);
let tries = 100;
for _ in 0..tries {
let cur = atomic.load(Relaxed);
// Try (weakly) to flip the flag.
if atomic.compare_exchange_weak(cur, !cur, Relaxed, Relaxed).is_err() {
// We failed, so return and skip the panic.
return;
}
}
panic!("compare_exchange_weak succeeded {} tries in a row", tries);
}

0 comments on commit a0485c5

Please sign in to comment.