diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs index b6699910b07cf..5515c17581b34 100644 --- a/src/libstd/sync/mod.rs +++ b/src/libstd/sync/mod.rs @@ -149,6 +149,7 @@ //! [`Once`]: crate::sync::Once //! [`RwLock`]: crate::sync::RwLock +#![deny(unsafe_op_in_unsafe_fn)] #![stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")] diff --git a/src/libstd/sync/mpsc/blocking.rs b/src/libstd/sync/mpsc/blocking.rs index d34de6a4fac3e..4cf45fa53b446 100644 --- a/src/libstd/sync/mpsc/blocking.rs +++ b/src/libstd/sync/mpsc/blocking.rs @@ -1,6 +1,5 @@ //! Generic support for building blocking abstractions. -use crate::mem; use crate::sync::atomic::{AtomicBool, Ordering}; use crate::sync::Arc; use crate::thread::{self, Thread}; @@ -47,14 +46,19 @@ impl SignalToken { /// flag. #[inline] pub unsafe fn cast_to_usize(self) -> usize { - mem::transmute(self.inner) + Arc::into_raw(self.inner) as usize } /// Converts from an unsafe usize value. Useful for retrieving a pipe's state /// flag. #[inline] pub unsafe fn cast_from_usize(signal_ptr: usize) -> SignalToken { - SignalToken { inner: mem::transmute(signal_ptr) } + // SAFETY: this ok because of the internal represention of Arc, which + // is a pointer and phantom data (so the size of a pointer -> a usize). + // + // NOTE: The call to `from_raw` is used in conjuction with the call to + // `into_raw` made in the `SignalToken::cast_to_usize` method. + SignalToken { inner: unsafe { Arc::from_raw(signal_ptr as *const _) } } } } diff --git a/src/libstd/sync/mpsc/mod.rs b/src/libstd/sync/mpsc/mod.rs index d6cc811154f11..65c615b22c2b6 100644 --- a/src/libstd/sync/mpsc/mod.rs +++ b/src/libstd/sync/mpsc/mod.rs @@ -648,10 +648,18 @@ enum Flavor { trait UnsafeFlavor { fn inner_unsafe(&self) -> &UnsafeCell>; unsafe fn inner_mut(&self) -> &mut Flavor { - &mut *self.inner_unsafe().get() + // SAFETY: We are sure the inner value will never be NUL when this is + // called, the invariants of the module make it so. + // + // The caller ensures that no other (mutable) reference will be handed + // out while the one returned here is in action. + unsafe { &mut *self.inner_unsafe().get() } } unsafe fn inner(&self) -> &Flavor { - &*self.inner_unsafe().get() + // SAFETY: We are sure the inner value will never be NUL when this is + // called, the invariants of the module make it so nor will any mutable + // reference be active while this one is. + unsafe { &*self.inner_unsafe().get() } } } impl UnsafeFlavor for Sender { diff --git a/src/libstd/sync/mpsc/spsc_queue.rs b/src/libstd/sync/mpsc/spsc_queue.rs index 0274268f69f25..8d8e185cbaf6b 100644 --- a/src/libstd/sync/mpsc/spsc_queue.rs +++ b/src/libstd/sync/mpsc/spsc_queue.rs @@ -99,7 +99,9 @@ impl Queue Self { let n1 = Node::new(); let n2 = Node::new(); - (*n1).next.store(n2, Ordering::Relaxed); + // SAFETY: At this point we know the pointer is valid: it was built + // just above. + unsafe { (*n1).next.store(n2, Ordering::Relaxed) } Queue { consumer: CacheAligned::new(Consumer { tail: UnsafeCell::new(n2), @@ -133,19 +135,23 @@ impl Queue *mut Node { - // First try to see if we can consume the 'first' node for our uses. - if *self.producer.first.get() != *self.producer.tail_copy.get() { - let ret = *self.producer.first.get(); - *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); - return ret; - } - // If the above fails, then update our copy of the tail and try - // again. - *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire); - if *self.producer.first.get() != *self.producer.tail_copy.get() { - let ret = *self.producer.first.get(); - *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); - return ret; + // SAFEY: ??? Explain why the pointers are safe to dereference and why + // returning a mutable pointer is ok. ??? + unsafe { + // First try to see if we can consume the 'first' node for our uses. + if *self.producer.first.get() != *self.producer.tail_copy.get() { + let ret = *self.producer.first.get(); + *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); + return ret; + } + // If the above fails, then update our copy of the tail and try + // again. + *self.producer.0.tail_copy.get() = self.consumer.tail_prev.load(Ordering::Acquire); + if *self.producer.first.get() != *self.producer.tail_copy.get() { + let ret = *self.producer.first.get(); + *self.producer.0.first.get() = (*ret).next.load(Ordering::Relaxed); + return ret; + } } // If all of that fails, then we have to allocate a new node // (there's nothing in the node cache). @@ -313,7 +319,7 @@ mod tests { } unsafe fn stress_bound(bound: usize) { - let q = Arc::new(Queue::with_additions(bound, (), ())); + let q = Arc::new(unsafe { Queue::with_additions(bound, (), ()) }); let (tx, rx) = channel(); let q2 = q.clone();