Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use atomic RMW for {mutex, rwlock, cond, srwlock}_get_or_create_id functions #2114

Merged
merged 4 commits into from
May 13, 2022
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 18 additions & 11 deletions src/data_race.rs
Original file line number Diff line number Diff line change
Expand Up @@ -441,21 +441,33 @@ impl MemoryCellClocks {
/// Evaluation context extensions.
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
fn offset_and_layout_to_place(
cbeuw marked this conversation as resolved.
Show resolved Hide resolved
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
let this = self.eval_context_ref();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
// Ensure that the access is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
Ok(value_place)
}

/// Atomic variant of read_scalar_at_offset.
fn read_scalar_at_offset_atomic(
&self,
op: &OpTy<'tcx, Tag>,
offset: u64,
layout: TyAndLayout<'tcx>,
atomic: AtomicReadOp,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let this = self.eval_context_ref();
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
this.read_scalar_atomic(&value_place, atomic)
}

Expand All @@ -469,12 +481,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
atomic: AtomicWriteOp,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let op_place = this.deref_operand(op)?;
let offset = Size::from_bytes(offset);

// Ensure that the following read at an offset is within bounds.
assert!(op_place.layout.size >= offset + layout.size);
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
this.write_scalar_atomic(value.into(), &value_place, atomic)
}

Expand Down
96 changes: 66 additions & 30 deletions src/shims/posix/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,16 +112,28 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
mutex_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, MutexId> {
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid mutex id. Need to allocate
// a new mutex.
let id = ecx.mutex_create();
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(MutexId::from_u32(id))
}
let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?;

ecx.mutex_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(MutexId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

// pthread_rwlock_t is between 32 and 56 bytes, depending on the platform.
Expand Down Expand Up @@ -156,16 +168,28 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
rwlock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new read-write lock.
let id = ecx.rwlock_create();
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?;

ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

// pthread_condattr_t
Expand Down Expand Up @@ -228,16 +252,28 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
cond_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, CondvarId> {
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid conditional variable id.
// Need to allocate a new id.
let id = ecx.condvar_create();
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
Ok(id)
} else {
Ok(CondvarId::from_u32(id))
}
let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?;

ecx.condvar_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

fn cond_get_clock_id<'mir, 'tcx: 'mir>(
Expand Down
32 changes: 22 additions & 10 deletions src/shims/windows/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,16 +7,28 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
ecx: &mut MiriEvalContext<'mir, 'tcx>,
lock_op: &OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, RwLockId> {
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
if id == 0 {
// 0 is a default value and also not a valid rwlock id. Need to allocate
// a new rwlock.
let id = ecx.rwlock_create();
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
Ok(id)
} else {
Ok(RwLockId::from_u32(id))
}
let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?;

ecx.rwlock_get_or_create(|ecx, next_id| {
let (old, success) = ecx
.atomic_compare_exchange_scalar(
&value_place,
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
next_id.to_u32_scalar().into(),
AtomicRwOp::Relaxed,
AtomicReadOp::Relaxed,
false,
)?
.to_scalar_pair()
.expect("compare_exchange returns a scalar pair");

Ok(if success.to_bool().expect("compare_exchange's second return value is a bool") {
// Caller of the closure needs to allocate next_id
None
} else {
Some(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
})
})
}

impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
Expand Down
60 changes: 60 additions & 0 deletions src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,24 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.mutexes.push(Default::default())
}

#[inline]
/// Provides the closure with the next MutexId. Creates that mutex if the closure returns None,
/// otherwise returns the value from the closure
fn mutex_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, MutexId>
where
F: FnOnce(&mut MiriEvalContext<'mir, 'tcx>, MutexId) -> InterpResult<'tcx, Option<MutexId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.mutexes.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.mutexes.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Get the id of the thread that currently owns this lock.
fn mutex_get_owner(&mut self, id: MutexId) -> ThreadId {
Expand Down Expand Up @@ -297,6 +315,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.rwlocks.push(Default::default())
}

#[inline]
/// Provides the closure with the next RwLockId. Creates that RwLock if the closure returns None,
/// otherwise returns the value from the closure
fn rwlock_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, RwLockId>
where
F: FnOnce(
&mut MiriEvalContext<'mir, 'tcx>,
RwLockId,
) -> InterpResult<'tcx, Option<RwLockId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.rwlocks.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.rwlocks.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Check if locked.
fn rwlock_is_locked(&self, id: RwLockId) -> bool {
Expand Down Expand Up @@ -445,6 +484,27 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
this.machine.threads.sync.condvars.push(Default::default())
}

#[inline]
/// Provides the closure with the next CondvarId. Creates that Condvar if the closure returns None,
/// otherwise returns the value from the closure
fn condvar_get_or_create<F>(&mut self, existing: F) -> InterpResult<'tcx, CondvarId>
where
F: FnOnce(
&mut MiriEvalContext<'mir, 'tcx>,
CondvarId,
) -> InterpResult<'tcx, Option<CondvarId>>,
{
let this = self.eval_context_mut();
let next_index = this.machine.threads.sync.condvars.next_index();
if let Some(old) = existing(this, next_index)? {
Ok(old)
} else {
let new_index = this.machine.threads.sync.condvars.push(Default::default());
assert_eq!(next_index, new_index);
Ok(new_index)
}
}

#[inline]
/// Is the conditional variable awaited?
fn condvar_is_awaited(&mut self, id: CondvarId) -> bool {
Expand Down