From 9156396d067a27590b982f5382f44e0c71c51b65 Mon Sep 17 00:00:00 2001 From: Imbris Date: Tue, 2 Jan 2024 21:50:13 -0500 Subject: [PATCH] skiplist: fix stacked borrows violations --- crossbeam-skiplist/src/base.rs | 441 ++++++++++++++++++++++----------- 1 file changed, 303 insertions(+), 138 deletions(-) diff --git a/crossbeam-skiplist/src/base.rs b/crossbeam-skiplist/src/base.rs index 7b0fb87cf..a2a9c59ce 100644 --- a/crossbeam-skiplist/src/base.rs +++ b/crossbeam-skiplist/src/base.rs @@ -6,8 +6,9 @@ use core::cmp; use core::fmt; use core::marker::PhantomData; use core::mem; -use core::ops::{Bound, Deref, Index, RangeBounds}; +use core::ops::{Bound, Deref, RangeBounds}; use core::ptr; +use core::ptr::NonNull; use core::sync::atomic::{fence, AtomicUsize, Ordering}; use crate::epoch::{self, Atomic, Collector, Guard, Shared}; @@ -31,12 +32,48 @@ struct Tower { pointers: [Atomic>; 0], } -impl Index for Tower { - type Output = Atomic>; - fn index(&self, index: usize) -> &Atomic> { - // This implementation is actually unsafe since we don't check if the - // index is in-bounds. But this is fine since this is only used internally. - unsafe { &*(&self.pointers as *const Atomic>).add(index) } +/// A "reference" to a Tower that preserves provenance for accessing the dynamically sized tower. +/// +/// A regular `&'a Tower` would not have permission to access any bytes under stacked borrows +/// since Tower is a placeholder ZST. +/// +/// Note, under tree borrows this isn't neccessary. +struct TowerRef<'a, K, V> { + ptr: NonNull>, + _marker: PhantomData<&'a Tower>, +} + +impl Clone for TowerRef<'_, K, V> { + fn clone(&self) -> Self { + *self + } +} +impl Copy for TowerRef<'_, K, V> {} + +impl<'a, K, V> TowerRef<'a, K, V> { + /// Creates a TowerRef. + /// + /// # Safety + /// + /// Same as NonNull::as_ref, except the pointer must be valid for accessing the actual + /// size of the tower. + #[inline] + unsafe fn new(ptr: NonNull>) -> Self { + Self { + ptr, + _marker: PhantomData, + } + } + + /// Gets the atomic node pointer at the specified level of the tower. + /// + /// # Safety + /// + /// Index must be in bounds. + #[inline] + unsafe fn get_level(self, index: usize) -> &'a Atomic> { + // SAFETY: Requirements passed to caller. + unsafe { &*(self.ptr.as_ptr() as *const Atomic>).add(index) } } } @@ -58,12 +95,22 @@ impl Head { pointers: Default::default(), } } -} -impl Deref for Head { - type Target = Tower; - fn deref(&self) -> &Tower { - unsafe { &*(self as *const _ as *const Tower) } + /// Gets `TowerRef` + #[inline] + fn as_tower(&self) -> TowerRef<'_, K, V> { + unsafe { TowerRef::new(NonNull::from(self).cast::>()) } + } + + /// Gets the atomic node pointer at the specified level. + /// + /// # Safety + /// + /// Index must be in bounds. + #[inline] + unsafe fn get_level(&self, index: usize) -> &Atomic> { + // SAFETY: Requirements passed to caller. + unsafe { self.pointers.get_unchecked(index) } } } @@ -91,6 +138,22 @@ struct Node { tower: Tower, } +/// A "reference" to a Node that preserves provenance for accessing the dynamically sized tower at +/// the end of the node allocation. +/// +/// Note, in a few situations below, we also rely on this for preserving write permissions. +struct NodeRef<'a, K, V> { + ptr: NonNull>, + _marker: PhantomData<&'a Node>, +} + +impl Clone for NodeRef<'_, K, V> { + fn clone(&self) -> Self { + *self + } +} +impl Copy for NodeRef<'_, K, V> {} + impl Node { /// Allocates a node. /// @@ -142,43 +205,6 @@ impl Node { (self.refs_and_height.load(Ordering::Relaxed) & HEIGHT_MASK) + 1 } - /// Marks all pointers in the tower and returns `true` if the level 0 was not marked. - fn mark_tower(&self) -> bool { - let height = self.height(); - - for level in (0..height).rev() { - let tag = unsafe { - // We're loading the pointer only for the tag, so it's okay to use - // `epoch::unprotected()` in this situation. - // TODO(Amanieu): can we use release ordering here? - self.tower[level] - .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) - .tag() - }; - - // If the level 0 pointer was already marked, somebody else removed the node. - if level == 0 && tag == 1 { - return false; - } - } - - // We marked the level 0 pointer, therefore we removed the node. - true - } - - /// Returns `true` if the node is removed. - #[inline] - fn is_removed(&self) -> bool { - let tag = unsafe { - // We're loading the pointer only for the tag, so it's okay to use - // `epoch::unprotected()` in this situation. - self.tower[0] - .load(Ordering::Relaxed, epoch::unprotected()) - .tag() - }; - tag == 1 - } - /// Attempts to increment the reference count of a node and returns `true` on success. /// /// The reference count can be incremented only if it is non-zero. @@ -216,9 +242,50 @@ impl Node { } } + /// Drops the key and value of a node, then deallocates it. + #[cold] + unsafe fn finalize(ptr: *mut Self) { + unsafe { + // Call destructors: drop the key and the value. + ptr::drop_in_place(&mut (*ptr).key); + ptr::drop_in_place(&mut (*ptr).value); + + // Finally, deallocate the memory occupied by the node. + Self::dealloc(ptr); + } + } +} + +impl<'a, K, V> NodeRef<'a, K, V> { + /// Creates a NodeRef. + /// + /// # Safety + /// + /// Same as NonNull::as_ref, except the pointer must also be valid for accessing the actual + /// size of the tower at the end of the node. + #[inline] + unsafe fn new(ptr: NonNull>) -> Self { + Self { + ptr, + _marker: PhantomData, + } + } + + /// # Safety + /// + /// See [`Shared::as_ref`]. + #[inline] + unsafe fn from_shared(shared: Shared<'a, Node>) -> Option { + if let Some(ptr) = NonNull::new(shared.as_raw().cast_mut()) { + Some(unsafe { Self::new(ptr) }) + } else { + None + } + } + /// Decrements the reference count of a node, destroying it if the count becomes zero. #[inline] - unsafe fn decrement(&self, guard: &Guard) { + unsafe fn decrement(self, guard: &Guard) { if self .refs_and_height .fetch_sub(1 << HEIGHT_BITS, Ordering::Release) @@ -226,14 +293,14 @@ impl Node { == 1 { fence(Ordering::Acquire); - unsafe { guard.defer_unchecked(move || Self::finalize(self)) } + unsafe { guard.defer_unchecked(move || Node::finalize(self.ptr.as_ptr())) } } } /// Decrements the reference count of a node, pinning the thread and destroying the node /// if the count become zero. #[inline] - unsafe fn decrement_with_pin(&self, parent: &SkipList, pin: F) + unsafe fn decrement_with_pin(self, parent: &SkipList, pin: F) where F: FnOnce() -> Guard, { @@ -246,23 +313,83 @@ impl Node { fence(Ordering::Acquire); let guard = &pin(); parent.check_guard(guard); - unsafe { guard.defer_unchecked(move || Self::finalize(self)) } + unsafe { guard.defer_unchecked(move || Node::finalize(self.ptr.as_ptr())) } } } - /// Drops the key and value of a node, then deallocates it. - #[cold] - unsafe fn finalize(ptr: *const Self) { - let ptr = ptr as *mut Self; + /// Marks all pointers in the tower and returns `true` if the level 0 was not marked. + fn mark_tower(self) -> bool { + let height = self.height(); - unsafe { - // Call destructors: drop the key and the value. - ptr::drop_in_place(&mut (*ptr).key); - ptr::drop_in_place(&mut (*ptr).value); + for level in (0..height).rev() { + let tag = unsafe { + // We're loading the pointer only for the tag, so it's okay to use + // `epoch::unprotected()` in this situation. + // TODO(Amanieu): can we use release ordering here? + self.get_level(level) + .fetch_or(1, Ordering::SeqCst, epoch::unprotected()) + .tag() + }; - // Finally, deallocate the memory occupied by the node. - Self::dealloc(ptr); + // If the level 0 pointer was already marked, somebody else removed the node. + if level == 0 && tag == 1 { + return false; + } } + + // We marked the level 0 pointer, therefore we removed the node. + true + } + + /// Returns `true` if the node is removed. + #[inline] + fn is_removed(self) -> bool { + let tag = unsafe { + // We're loading the pointer only for the tag, so it's okay to use + // `epoch::unprotected()` in this situation. + self.get_level(0) + .load(Ordering::Relaxed, epoch::unprotected()) + .tag() + }; + tag == 1 + } + + /// Creates a TowerRef to the atomic pointers at the end of this Node allocation. + #[inline] + fn as_tower(self) -> TowerRef<'a, K, V> { + // SAFETY: self.ptr has provenance to access the tower. + unsafe { + TowerRef::new(NonNull::new_unchecked( + ptr::addr_of!((*self.ptr.as_ptr()).tower).cast_mut(), + )) + } + } + + /// Gwts a plain reference to the node which can be used for anything that doesn't need to acces + /// the tower. + #[inline] + fn as_ref(self) -> &'a Node { + // SAFETY: Self::new requires the conditions for creating a Node reference. + unsafe { self.ptr.as_ref() } + } + + /// Gets the atomic node pointer at the specified level of the tower. + /// + /// # Safety + /// + /// Index must be in bounds. + #[inline] + unsafe fn get_level(&self, index: usize) -> &Atomic> { + // SAFETY: Requirements passed to caller. + unsafe { self.as_tower().get_level(index) } + } +} + +impl Deref for NodeRef<'_, K, V> { + type Target = Node; + #[inline] + fn deref(&self) -> &Node { + self.as_ref() } } @@ -279,6 +406,16 @@ where } } +impl fmt::Debug for NodeRef<'_, K, V> +where + K: fmt::Debug, + V: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + as fmt::Debug>::fmt(&*self, f) + } +} + /// A search result. /// /// The result indicates whether the key was found, as well as what were the adjacent nodes to the @@ -287,10 +424,10 @@ struct Position<'a, K, V> { /// Reference to a node with the given key, if found. /// /// If this is `Some` then it will point to the same node as `right[0]`. - found: Option<&'a Node>, + found: Option>, /// Adjacent nodes with smaller keys (predecessors). - left: [&'a Tower; MAX_HEIGHT], + left: [TowerRef<'a, K, V>; MAX_HEIGHT], /// Adjacent nodes with equal or greater keys (successors). right: [Shared<'a, Node>; MAX_HEIGHT], @@ -387,7 +524,7 @@ where /// Returns the entry with the smallest key. pub fn front<'a: 'g, 'g>(&'a self, guard: &'g Guard) -> Option> { self.check_guard(guard); - let n = self.next_node(&self.head, Bound::Unbounded, guard)?; + let n = self.next_node(self.head.as_tower(), Bound::Unbounded, guard)?; Some(Entry { parent: self, node: n, @@ -574,7 +711,9 @@ where // Note that we're loading the pointer only to check whether it is null, so it's okay // to use `epoch::unprotected()` in this situation. while height >= 4 - && self.head[height - 2] + && self + .head + .get_level(height - 2) .load(Ordering::Relaxed, epoch::unprotected()) .is_null() { @@ -607,14 +746,14 @@ where unsafe fn help_unlink<'a>( &'a self, pred: &'a Atomic>, - curr: &'a Node, + curr: NodeRef<'a, K, V>, succ: Shared<'a, Node>, guard: &'a Guard, ) -> Option>> { // If `succ` is marked, that means `curr` is removed. Let's try // unlinking it from the skip list at this level. match pred.compare_exchange( - Shared::from(curr as *const _), + Shared::from(curr.ptr.as_ptr().cast_const()), succ.with_tag(0), Ordering::Release, Ordering::Relaxed, @@ -634,13 +773,13 @@ where /// node is reached then a search is performed using the given key. fn next_node<'a>( &'a self, - pred: &'a Tower, + pred: TowerRef<'a, K, V>, lower_bound: Bound<&K>, guard: &'a Guard, - ) -> Option<&'a Node> { + ) -> Option> { unsafe { // Load the level 0 successor of the current node. - let mut curr = pred[0].load_consume(guard); + let mut curr = pred.get_level(0).load_consume(guard); // If `curr` is marked, that means `pred` is removed and we have to use // a key search. @@ -648,11 +787,11 @@ where return self.search_bound(lower_bound, false, guard); } - while let Some(c) = curr.as_ref() { - let succ = c.tower[0].load_consume(guard); + while let Some(c) = NodeRef::from_shared(curr) { + let succ = c.get_level(0).load_consume(guard); if succ.tag() == 1 { - if let Some(c) = self.help_unlink(&pred[0], c, succ, guard) { + if let Some(c) = self.help_unlink(pred.get_level(0), c, succ, guard) { // On success, continue searching through the current level. curr = c; continue; @@ -683,7 +822,7 @@ where bound: Bound<&Q>, upper_bound: bool, guard: &'a Guard, - ) -> Option<&'a Node> + ) -> Option> where K: Borrow, Q: Ord + ?Sized, @@ -695,7 +834,9 @@ where // Fast loop to skip empty tower levels. while level >= 1 - && self.head[level - 1] + && self + .head + .get_level(level - 1) .load(Ordering::Relaxed, guard) .is_null() { @@ -706,13 +847,13 @@ where let mut result = None; // The predecessor node - let mut pred = &*self.head; + let mut pred = self.head.as_tower(); while level >= 1 { level -= 1; // Two adjacent nodes at the current level. - let mut curr = pred[level].load_consume(guard); + let mut curr = pred.get_level(level).load_consume(guard); // If `curr` is marked, that means `pred` is removed and we have to restart the // search. @@ -722,11 +863,12 @@ where // Iterate through the current level until we reach a node with a key greater // than or equal to `key`. - while let Some(c) = curr.as_ref() { - let succ = c.tower[level].load_consume(guard); + while let Some(c) = NodeRef::from_shared(curr) { + let succ = c.get_level(level).load_consume(guard); if succ.tag() == 1 { - if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { + if let Some(c) = self.help_unlink(pred.get_level(level), c, succ, guard) + { // On success, continue searching through the current level. curr = c; continue; @@ -754,7 +896,7 @@ where } // Move one step forward. - pred = &c.tower; + pred = c.as_tower(); curr = succ; } } @@ -775,7 +917,7 @@ where // The result of this search. let mut result = Position { found: None, - left: [&*self.head; MAX_HEIGHT], + left: [self.head.as_tower(); MAX_HEIGHT], right: [Shared::null(); MAX_HEIGHT], }; @@ -784,7 +926,9 @@ where // Fast loop to skip empty tower levels. while level >= 1 - && self.head[level - 1] + && self + .head + .get_level(level - 1) .load(Ordering::Relaxed, guard) .is_null() { @@ -792,13 +936,13 @@ where } // The predecessor node - let mut pred = &*self.head; + let mut pred = self.head.as_tower(); while level >= 1 { level -= 1; // Two adjacent nodes at the current level. - let mut curr = pred[level].load_consume(guard); + let mut curr = pred.get_level(level).load_consume(guard); // If `curr` is marked, that means `pred` is removed and we have to restart the // search. @@ -808,11 +952,12 @@ where // Iterate through the current level until we reach a node with a key greater // than or equal to `key`. - while let Some(c) = curr.as_ref() { - let succ = c.tower[level].load_consume(guard); + while let Some(c) = NodeRef::from_shared(curr) { + let succ = c.get_level(level).load_consume(guard); if succ.tag() == 1 { - if let Some(c) = self.help_unlink(&pred[level], c, succ, guard) { + if let Some(c) = self.help_unlink(pred.get_level(level), c, succ, guard) + { // On success, continue searching through the current level. curr = c; continue; @@ -835,7 +980,7 @@ where } // Move one step forward. - pred = &c.tower; + pred = c.as_tower(); curr = succ; } @@ -915,7 +1060,10 @@ where ptr::addr_of_mut!((*n).key).write(key); ptr::addr_of_mut!((*n).value).write(value); - (Shared::>::from(n as *const _), &*n) + ( + Shared::>::from(n as *const _), + NodeRef::new(NonNull::new_unchecked(n)), + ) }; // Optimistically increment `len`. @@ -923,11 +1071,12 @@ where loop { // Set the lowest successor of `n` to `search.right[0]`. - n.tower[0].store(search.right[0], Ordering::Relaxed); + n.get_level(0).store(search.right[0], Ordering::Relaxed); // Try installing the new node into the skip list (at level 0). // TODO(Amanieu): can we use release ordering here? - if search.left[0][0] + if search.left[0] + .get_level(0) .compare_exchange( search.right[0], node, @@ -946,7 +1095,7 @@ where struct ScopeGuard(*const Node); impl Drop for ScopeGuard { fn drop(&mut self) { - unsafe { Node::finalize(self.0) } + unsafe { Node::finalize(self.0.cast_mut()) } } } let sg = ScopeGuard(node.as_raw()); @@ -967,7 +1116,7 @@ where // let's try returning it as an entry. if let Some(e) = RefEntry::try_acquire(self, r) { // Destroy the new node. - Node::finalize(node.as_raw()); + Node::finalize(node.as_raw().cast_mut()); self.hot_data.len.fetch_sub(1, Ordering::Relaxed); return e; @@ -994,7 +1143,7 @@ where // Load the current value of the pointer in the tower at this level. // TODO(Amanieu): can we use relaxed ordering here? - let next = n.tower[level].load(Ordering::SeqCst, guard); + let next = n.get_level(level).load(Ordering::SeqCst, guard); // If the current pointer is marked, that means another thread is already // removing the node we've just inserted. In that case, let's just stop @@ -1030,7 +1179,7 @@ where // operation fails, that means another thread has marked the pointer and we // should stop building the tower. // TODO(Amanieu): can we use release ordering here? - if n.tower[level] + if n.get_level(level) .compare_exchange(next, succ, Ordering::SeqCst, Ordering::SeqCst, guard) .is_err() { @@ -1044,7 +1193,8 @@ where // Try installing the new node at the current level. // TODO(Amanieu): can we use release ordering here? - if pred[level] + if pred + .get_level(level) .compare_exchange(succ, node, Ordering::SeqCst, Ordering::SeqCst, guard) .is_ok() { @@ -1073,7 +1223,7 @@ where // installation, we must repeat the search, which will unlink the new node at that // level. // TODO(Amanieu): can we use relaxed ordering here? - if n.tower[height - 1].load(Ordering::SeqCst, guard).tag() == 1 { + if n.get_level(height - 1).load(Ordering::SeqCst, guard).tag() == 1 { self.search_bound(Bound::Included(&n.key), false, guard); } @@ -1151,13 +1301,14 @@ where // the `left` and `right` lists. for level in (0..n.height()).rev() { // TODO(Amanieu): can we use relaxed ordering here? - let succ = n.tower[level].load(Ordering::SeqCst, guard).with_tag(0); + let succ = n.get_level(level).load(Ordering::SeqCst, guard).with_tag(0); // Try linking the predecessor and successor at this level. // TODO(Amanieu): can we use release ordering here? - if search.left[level][level] + if search.left[level] + .get_level(level) .compare_exchange( - Shared::from(n as *const _), + Shared::from(n.ptr.as_ptr().cast_const()), succ, Ordering::SeqCst, Ordering::SeqCst, @@ -1256,20 +1407,22 @@ where impl Drop for SkipList { fn drop(&mut self) { unsafe { - let mut node = self.head[0] - .load(Ordering::Relaxed, epoch::unprotected()) - .as_ref(); + let mut node = NodeRef::from_shared( + self.head + .get_level(0) + .load(Ordering::Relaxed, epoch::unprotected()), + ); // Iterate through the whole skip list and destroy every node. while let Some(n) = node { // Unprotected loads are okay because this function is the only one currently using // the skip list. - let next = n.tower[0] - .load(Ordering::Relaxed, epoch::unprotected()) - .as_ref(); + let next = NodeRef::from_shared( + n.get_level(0).load(Ordering::Relaxed, epoch::unprotected()), + ); // Deallocate every node. - Node::finalize(n); + Node::finalize(n.ptr.as_ptr()); node = next; } @@ -1297,13 +1450,17 @@ impl IntoIterator for SkipList { // // Unprotected loads are okay because this function is the only one currently using // the skip list. - let front = self.head[0] + let front = self + .head + .get_level(0) .load(Ordering::Relaxed, epoch::unprotected()) .as_raw(); // Clear the skip list by setting all pointers in head to null. for level in 0..MAX_HEIGHT { - self.head[level].store(Shared::null(), Ordering::Relaxed); + self.head + .get_level(level) + .store(Shared::null(), Ordering::Relaxed); } IntoIter { @@ -1320,7 +1477,7 @@ impl IntoIterator for SkipList { /// not outlive the `SkipList`. pub struct Entry<'a: 'g, 'g, K, V> { parent: &'a SkipList, - node: &'g Node, + node: NodeRef<'g, K, V>, guard: &'g Guard, } @@ -1332,12 +1489,12 @@ impl<'a: 'g, 'g, K: 'a, V: 'a> Entry<'a, 'g, K, V> { /// Returns a reference to the key. pub fn key(&self) -> &'g K { - &self.node.key + &self.node.as_ref().key } /// Returns a reference to the value. pub fn value(&self) -> &'g V { - &self.node.value + &self.node.as_ref().value } /// Returns a reference to the parent `SkipList` @@ -1421,7 +1578,7 @@ where /// Returns the next entry in the skip list. pub fn next(&self) -> Option> { let n = self.parent.next_node( - &self.node.tower, + self.node.as_tower(), Bound::Excluded(&self.node.key), self.guard, )?; @@ -1462,7 +1619,7 @@ where /// leaked. This is because releasing the entry requires a `Guard`. pub struct RefEntry<'a, K, V> { parent: &'a SkipList, - node: &'a Node, + node: NodeRef<'a, K, V>, } impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { @@ -1505,7 +1662,7 @@ impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { /// a node. unsafe fn try_acquire( parent: &'a SkipList, - node: &Node, + node: NodeRef<'_, K, V>, ) -> Option> { if unsafe { node.try_increment() } { Some(RefEntry { @@ -1513,7 +1670,7 @@ impl<'a, K: 'a, V: 'a> RefEntry<'a, K, V> { // We re-bind the lifetime of the node here to that of the skip // list since we now hold a reference to it. - node: unsafe { &*(node as *const _) }, + node: unsafe { NodeRef::new(node.ptr) }, }) } else { None @@ -1552,7 +1709,7 @@ impl Clone for RefEntry<'_, K, V> { fn clone(&self) -> Self { unsafe { // Incrementing will always succeed since we're already holding a reference to the node. - Node::try_increment(self.node); + Node::try_increment(&*self.node); } Self { parent: self.parent, @@ -1597,7 +1754,7 @@ where loop { n = self .parent - .next_node(&n.tower, Bound::Excluded(&n.key), guard)?; + .next_node(n.as_tower(), Bound::Excluded(&n.key), guard)?; if let Some(e) = RefEntry::try_acquire(self.parent, n) { return Some(e); } @@ -1636,8 +1793,8 @@ where /// An iterator over the entries of a `SkipList`. pub struct Iter<'a: 'g, 'g, K, V> { parent: &'a SkipList, - head: Option<&'g Node>, - tail: Option<&'g Node>, + head: Option>, + tail: Option>, guard: &'g Guard, } @@ -1651,10 +1808,11 @@ where self.head = match self.head { Some(n) => self .parent - .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), - None => self - .parent - .next_node(&self.parent.head, Bound::Unbounded, self.guard), + .next_node(n.as_tower(), Bound::Excluded(&n.key), self.guard), + None => { + self.parent + .next_node(self.parent.head.as_tower(), Bound::Unbounded, self.guard) + } }; if let (Some(h), Some(t)) = (self.head, self.tail) { if h.key >= t.key { @@ -1702,8 +1860,8 @@ where { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Iter") - .field("head", &self.head.map(|n| (&n.key, &n.value))) - .field("tail", &self.tail.map(|n| (&n.key, &n.value))) + .field("head", &self.head.as_deref().map(|n| (&n.key, &n.value))) + .field("tail", &self.tail.as_deref().map(|n| (&n.key, &n.value))) .finish() } } @@ -1814,8 +1972,8 @@ where Q: Ord + ?Sized, { parent: &'a SkipList, - head: Option<&'g Node>, - tail: Option<&'g Node>, + head: Option>, + tail: Option>, range: R, guard: &'g Guard, _marker: PhantomData Q>, // covariant over `Q` @@ -1833,14 +1991,14 @@ where self.head = match self.head { Some(n) => self .parent - .next_node(&n.tower, Bound::Excluded(&n.key), self.guard), + .next_node(n.as_tower(), Bound::Excluded(&n.key), self.guard), None => self .parent .search_bound(self.range.start_bound(), false, self.guard), }; if let Some(h) = self.head { let bound = match self.tail { - Some(t) => Bound::Excluded(t.key.borrow()), + Some(t) => Bound::Excluded(t.as_ref().key.borrow()), None => self.range.end_bound(), }; if !below_upper_bound(&bound, h.key.borrow()) { @@ -1873,7 +2031,7 @@ where }; if let Some(t) = self.tail { let bound = match self.head { - Some(h) => Bound::Excluded(h.key.borrow()), + Some(h) => Bound::Excluded(h.as_ref().key.borrow()), None => self.range.start_bound(), }; if !above_lower_bound(&bound, t.key.borrow()) { @@ -2034,15 +2192,18 @@ pub struct IntoIter { impl Drop for IntoIter { fn drop(&mut self) { // Iterate through the whole chain and destroy every node. - while !self.node.is_null() { + while let Some(node) = NonNull::new(self.node) { unsafe { + let node = NodeRef::new(node); // Unprotected loads are okay because this function is the only one currently using // the skip list. - let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); + let next = node + .get_level(0) + .load(Ordering::Relaxed, epoch::unprotected()); // We can safely do this without deferring because references to // keys & values that we give out never outlive the SkipList. - Node::finalize(self.node); + Node::finalize(node.ptr.as_ptr()); self.node = next.as_raw() as *mut Node; } @@ -2069,7 +2230,11 @@ impl Iterator for IntoIter { // // Unprotected loads are okay because this function is the only one currently using // the skip list. - let next = (*self.node).tower[0].load(Ordering::Relaxed, epoch::unprotected()); + let next = { + let node = NodeRef::new(NonNull::new_unchecked(self.node)); + node.get_level(0) + .load(Ordering::Relaxed, epoch::unprotected()) + }; // Deallocate the current node and move to the next one. Node::dealloc(self.node);