From e4580da8a8175f334ad7e259d2a7c1ec893d6e49 Mon Sep 17 00:00:00 2001 From: Jorge Aparicio Date: Mon, 20 Aug 2018 20:19:19 +0100 Subject: [PATCH 01/37] x86_64: "practically" thread-safe Pool --- ci/script.sh | 1 - src/lib.rs | 3 +- src/pool/cas.rs | 212 ++++++++++++++++++++++++++++++++++++++++++ src/pool/llsc.rs | 76 +++++++++++++++ src/pool/mod.rs | 189 ++++++++++++++++++++++++------------- src/pool/singleton.rs | 14 +-- tests/tsan.rs | 31 ++++++ 7 files changed, 451 insertions(+), 75 deletions(-) create mode 100644 src/pool/cas.rs create mode 100644 src/pool/llsc.rs diff --git a/ci/script.sh b/ci/script.sh index e235fa266b..48f9acff76 100644 --- a/ci/script.sh +++ b/ci/script.sh @@ -17,7 +17,6 @@ main() { if [ $TRAVIS_RUST_VERSION = nightly ]; then export RUSTFLAGS="-Z sanitizer=thread" - export RUST_TEST_THREADS=1 export TSAN_OPTIONS="suppressions=$(pwd)/suppressions.txt" cargo test --test tsan --target $TARGET diff --git a/src/lib.rs b/src/lib.rs index 221e169c80..b2b88ff616 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,7 +73,8 @@ #![deny(missing_docs)] #![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] -#![deny(warnings)] +// #![deny(warnings)] +#![allow(warnings)] // FIXME pub use binary_heap::BinaryHeap; pub use generic_array::typenum::consts; diff --git a/src/pool/cas.rs b/src/pool/cas.rs new file mode 100644 index 0000000000..ffbedeaf21 --- /dev/null +++ b/src/pool/cas.rs @@ -0,0 +1,212 @@ +//! Stack based on CAS atomics +//! +//! To reduce the chance of hitting the ABA problem we use a 32-bit offset + a 32-bit version tag +//! instead of a 64-bit pointer. The version tag will be bumped on each successful `pop` operation. + +use core::{ + cell::UnsafeCell, + convert::TryFrom, + marker::PhantomData, + mem::{self, MaybeUninit}, + num::NonZeroUsize, + ptr::NonNull, + sync::atomic::{AtomicUsize, Ordering}, +}; + +/// Unfortunate implementation detail required to use the +/// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method +pub struct Node { + next: Atomic>, + pub(crate) data: UnsafeCell, +} + +impl Node { + fn next(&self) -> &Atomic> { + &self.next + } +} + +pub struct Stack { + head: Atomic>, +} + +impl Stack { + pub const fn new() -> Self { + Self { + head: Atomic::null(), + } + } + + pub fn push(&self, new_head: Ptr>) { + let mut head = self.head.load(Ordering::Relaxed); + + loop { + unsafe { + new_head + .as_raw() + .as_ref() + .next() + .store(head, Ordering::Relaxed); + } + + if let Err(p) = self.head.compare_and_exchange_weak( + head, + Some(new_head), + Ordering::Release, + Ordering::Relaxed, + ) { + head = p; + } else { + return; + } + } + } + + pub fn try_pop(&self) -> Option>> { + loop { + if let Some(mut head) = self.head.load(Ordering::Acquire) { + let next = unsafe { head.as_raw().as_ref().next().load(Ordering::Relaxed) }; + + if self + .head + .compare_and_exchange_weak( + Some(head), + next, + Ordering::Release, + Ordering::Relaxed, + ) + .is_ok() + { + head.incr_tag(); + return Some(head); + } + } else { + // stack observed empty + return None; + } + } + } +} + +fn anchor() -> *mut T { + static mut ANCHOR: u8 = 0; + (unsafe { &mut ANCHOR } as *mut u8 as usize & !(mem::align_of::() - 1)) as *mut T +} + +/// Anchored pointer. This is a (signed) 32-bit offset from `anchor` plus a 32-bit tag +pub struct Ptr { + inner: NonZeroUsize, + _marker: PhantomData<*mut T>, +} + +impl Clone for Ptr { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Ptr {} + +impl Ptr { + pub fn new(p: *mut T) -> Option { + i32::try_from((p as isize).wrapping_sub(anchor::() as isize)) + .ok() + .map(|offset| unsafe { Ptr::from_parts(0, offset) }) + } + + unsafe fn from_parts(tag: u32, offset: i32) -> Self { + Self { + inner: NonZeroUsize::new_unchecked((tag as usize) << 32 | (offset as u32 as usize)), + _marker: PhantomData, + } + } + + fn from_usize(p: usize) -> Option { + NonZeroUsize::new(p).map(|inner| Self { + inner, + _marker: PhantomData, + }) + } + + fn into_usize(&self) -> usize { + self.inner.get() + } + + fn tag(&self) -> u32 { + (self.inner.get() >> 32) as u32 + } + + fn incr_tag(&mut self) { + let tag = self.tag().wrapping_add(1); + let offset = self.offset(); + + *self = unsafe { Ptr::from_parts(tag, offset) }; + } + + fn offset(&self) -> i32 { + self.inner.get() as i32 + } + + fn as_raw(&self) -> NonNull { + unsafe { + NonNull::new_unchecked( + anchor::() + .cast::() + .offset(self.offset() as isize) + .cast(), + ) + } + } + + pub fn dangling() -> Self { + unsafe { Self::from_parts(0, 1) } + } + + pub unsafe fn as_ref(&self) -> &T { + &*self.as_raw().as_ptr() + } +} + +struct Atomic { + inner: AtomicUsize, + _marker: PhantomData<*mut T>, +} + +impl Atomic { + const fn null() -> Self { + Self { + inner: AtomicUsize::new(0), + _marker: PhantomData, + } + } + + fn compare_and_exchange_weak( + &self, + current: Option>, + new: Option>, + succ: Ordering, + fail: Ordering, + ) -> Result<(), Option>> { + self.inner + .compare_exchange_weak( + current.map(|p| p.into_usize()).unwrap_or(0), + new.map(|p| p.into_usize()).unwrap_or(0), + succ, + fail, + ) + .map(drop) + .map_err(Ptr::from_usize) + } + + fn load(&self, ord: Ordering) -> Option> { + NonZeroUsize::new(self.inner.load(ord)).map(|inner| Ptr { + inner, + _marker: PhantomData, + }) + } + + fn store(&self, val: Option>, ord: Ordering) { + self.inner + .store(val.map(|p| p.into_usize()).unwrap_or(0), ord) + } +} diff --git a/src/pool/llsc.rs b/src/pool/llsc.rs new file mode 100644 index 0000000000..3d1c61f437 --- /dev/null +++ b/src/pool/llsc.rs @@ -0,0 +1,76 @@ +//! Stack based on LL/SC atomics + +pub use core::ptr::NonNull as Ptr; +use core::{ + cell::UnsafeCell, + ptr, + sync::atomic::{self, AtomicPtr, Ordering}, +}; + +pub struct Node { + next: AtomicPtr>, + pub(crate) data: UnsafeCell, +} + +impl Node { + fn next(&self) -> &AtomicPtr> { + &self.next + } +} + +pub struct Stack { + head: AtomicPtr>, +} + +impl Stack { + pub const fn new() -> Self { + Self { + head: AtomicPtr::new(ptr::null_mut()), + } + } + + pub fn push(&self, new_head: Ptr>) { + // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` + + let mut head = self.head.load(Ordering::Relaxed); + loop { + unsafe { new_head.as_ref().next().store(head, Ordering::Relaxed) } + + match self.head.compare_exchange_weak( + head, + new_head.as_ptr(), + Ordering::Release, // success + Ordering::Relaxed, // failure + ) { + Ok(_) => return, + // interrupt occurred or other core made a successful STREX op on the head + Err(p) => head = p, + } + } + } + + pub fn try_pop(&self) -> Option>> { + // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` + + loop { + let head = self.head.load(Ordering::Acquire); + if let Some(nn_head) = Ptr::new(head) { + let next = unsafe { nn_head.as_ref().next().load(Ordering::Relaxed) }; + + match self.head.compare_exchange_weak( + head, + next, + Ordering::Release, // success + Ordering::Relaxed, // failure + ) { + Ok(_) => break Some(nn_head), + // interrupt occurred or other core made a successful STREX op on the head + Err(_) => continue, + } + } else { + // stack is observed as empty + break None; + } + } + } +} diff --git a/src/pool/mod.rs b/src/pool/mod.rs index a8dafdfd16..d4165bd312 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -76,10 +76,11 @@ //! let set_order = ..; //! //! // `self.head` has type `AtomicPtr>` +//! // where `struct Node { next: AtomicPtr>, data: UnsafeCell }` //! let mut head = self.head.load(fetch_order); //! loop { //! if let Some(nn_head) = NonNull::new(head) { -//! let next = unsafe { (*head).next }; +//! let next = unsafe { (*head).next.load(Ordering::Relaxed) }; //! //! // <~ preempted //! @@ -150,6 +151,63 @@ //! while the current core is somewhere between LDREX and STREX then the current core will fail its //! STREX operation. //! +//! # x86_64 support / limitations +//! +//! x86_64 support is a gamble. Yes, a gamble. Do you feel lucky enough to use `Pool` on x86_64? +//! +//! As it's not possible to implement *ideal* LL/SC semantics (\*) on x86_64 the architecture is +//! susceptible to the ABA problem described above. To *reduce the chances* of ABA occurring in +//! practice we use version tags (keyword: IBM ABA-prevention tags). Again, this approach does +//! *not* fix / prevent / avoid the ABA problem; it only reduces the chance of it occurring in +//! practice but the chances of it occurring are not reduced to zero. +//! +//! How we have implemented version tags: instead of using an `AtomicPtr` to link the stack `Node`s +//! we use an `AtomicUsize` where the 64-bit `usize` is always comprised of a monotonically +//! increasing 32-bit tag (higher bits) and a 32-bit signed address offset. The address of a node is +//! computed by adding the 32-bit offset to an "anchor" address (the address of a static variable +//! that lives somewhere in the `.bss` linker section). The tag is increased every time a node is +//! popped (removed) from the stack. +//! +//! To see how version tags can prevent ABA consider the example from the previous section. Let's +//! start with a stack in this state: `(~A, 0) -> (~B, 1) -> (~C, 2)`, where `~A` represents the +//! address of node A as a 32-bit offset from the "anchor" and the second tuple element (e.g. `0`) +//! indicates the version of the node. For simplicity, assume a single core system: thread T1 is +//! performing `pop` and before `CAS(&self.head, (~A, 0), (~B, 1))` is executed a context switch +//! occurs and the core resumes T2. T2 pops the stack twice and pushes A back into the stack; +//! because the `pop` operation increases the version the stack ends in the following state: `(~A, +//! 1) -> (~C, 2)`. Now if T1 is resumed the CAS operation will fail because `self.head` is `(~A, +//! 1)` and not `(~A, 0)`. +//! +//! When can version tags fail to prevent ABA? Using the previous example: if T2 performs a `push` +//! followed by a `pop` `(1 << 32) - 1` times before doing its original `pop` - `pop` - `push` +//! operation then ABA will occur because the version tag of node `A` will wraparound to its +//! original value of `0` and the CAS operation in T1 will succeed and corrupt the stack. +//! +//! It does seem unlikely that (1) a thread will perform the above operation and (2) that the above +//! operation will complete within one time slice, assuming time sliced threads. If you have thread +//! priorities then the above operation could occur during the lifetime of many high priorities +//! threads if T1 is running at low priority. +//! +//! Other implementations of version tags use more than 32 bits in their tags (e.g. "Scalable +//! Lock-Free Dynamic Memory Allocation" uses 42-bit tags in its super blocks). In theory, one could +//! use double-word CAS on x86_64 to pack a 64-bit tag and a 64-bit pointer in a double-word but +//! this CAS operation is not exposed in the standard library (and I think it's not available on +//! older x86_64 processors?) +//! +//! (\*) Apparently one can emulate proper LL/SC semantics on x86_64 using hazard pointers (?) -- +//! the technique appears to be documented in "ABA Prevention Using Single-Word Instructions", which +//! is not public AFAICT -- but hazard pointers require Thread Local Storage (TLS), which is a +//! non-starter for a `no_std` library like `heapless`. +//! +//! ## x86_64 Limitations +//! +//! Because stack nodes must be located within +- 2 GB of the hidden `ANCHOR` variable, which +//! lives in the `.bss` section, `Pool` may not be able to manage static references created using +//! `Box::leak` -- these heap allocated chunks of memory may live in a very different address space. +//! When the `Pool` is unable to manage a node because of its address it will simply discard it: +//! `Pool::grow*` methods return the number of new memory blocks added to the pool; if these methods +//! return `0` it means the `Pool` is unable to manage the memory given to them. +//! //! # References //! //! 1. [Cortex-M3 Devices Generic User Guide (DUI 0552A)][0], Section 2.2.7 "Synchronization @@ -161,6 +219,10 @@ //! semaphores" //! //! [1]: https://static.docs.arm.com/ddi0403/eb/DDI0403E_B_armv7m_arm.pdf +//! +//! 3. "Scalable Lock-Free Dynamic Memory Allocation" Michael, Maged M. +//! +//! 4. "Hazard pointers: Safe memory reclamation for lock-free objects." Michael, Maged M. use core::{any::TypeId, mem, sync::atomic::Ordering}; use core::{ @@ -170,27 +232,30 @@ use core::{ marker::PhantomData, mem::MaybeUninit, ops::{Deref, DerefMut}, - ptr::{self, NonNull}, + ptr, sync::atomic::AtomicPtr, }; use as_slice::{AsMutSlice, AsSlice}; +pub use stack::Node; +use stack::{Ptr, Stack}; + pub mod singleton; +#[cfg_attr(target_arch = "x86_64", path = "cas.rs")] +#[cfg_attr(not(target_arch = "x86_64"), path = "llsc.rs")] +mod stack; /// A lock-free memory pool pub struct Pool { - head: AtomicPtr>, + stack: Stack, // Current implementation is unsound on architectures that don't have LL/SC semantics so this // struct is not `Sync` on those platforms _not_send_or_sync: PhantomData<*const ()>, } -// NOTE: Here we lie about `Pool` implementing `Sync` on x86_64. This is not true but it lets us -// test the `pool!` and `singleton::Pool` abstractions. We just have to be careful not to use the -// pool in a multi-threaded context -#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, test))] +#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, target_arch = "x86_64"))] unsafe impl Sync for Pool {} unsafe impl Send for Pool {} @@ -199,7 +264,7 @@ impl Pool { /// Creates a new empty pool pub const fn new() -> Self { Pool { - head: AtomicPtr::new(ptr::null_mut()), + stack: Stack::new(), _not_send_or_sync: PhantomData, } @@ -211,7 +276,14 @@ impl Pool { /// /// *NOTE:* This method does *not* have bounded execution time because it contains a CAS loop pub fn alloc(&self) -> Option> { - if let Some(node) = self.pop() { + if mem::size_of::() == 0 { + return Some(Box { + node: Ptr::dangling(), + _state: PhantomData, + }); + } + + if let Some(node) = self.stack.try_pop() { Some(Box { node, _state: PhantomData, @@ -236,7 +308,12 @@ impl Pool { } } - self.push(value.node) + // no operation + if mem::size_of::() == 0 { + return; + } + + self.stack.push(value.node) } /// Increases the capacity of the pool @@ -245,12 +322,17 @@ impl Pool { /// /// This method returns the number of *new* blocks that can be allocated. pub fn grow(&self, memory: &'static mut [u8]) -> usize { + let sz = mem::size_of::>(); + + if sz == 0 { + // SZT use no memory so a pool of SZT always has maximum capacity + return usize::max_value(); + } + let mut p = memory.as_mut_ptr(); let mut len = memory.len(); let align = mem::align_of::>(); - let sz = mem::size_of::>(); - let rem = (p as usize) % align; if rem != 0 { let offset = align - rem; @@ -266,7 +348,19 @@ impl Pool { let mut n = 0; while len >= sz { - self.push(unsafe { NonNull::new_unchecked(p as *mut _) }); + match () { + #[cfg(target_arch = "x86_64")] + () => { + if let Some(p) = Ptr::new(p as *mut _) { + self.stack.push(p); + } + } + + #[cfg(not(target_arch = "x86_64"))] + () => { + self.stack.push(unsafe { Ptr::new_unchecked(p as *mut _) }); + } + } n += 1; p = unsafe { p.add(sz) }; @@ -284,71 +378,33 @@ impl Pool { where A: AsMutSlice>, { + if mem::size_of::() == 0 { + return usize::max_value(); + } + let nodes = unsafe { (*memory.as_mut_ptr()).as_mut_slice() }; let cap = nodes.len(); for p in nodes { - self.push(NonNull::from(p)) - } - cap - } - - fn pop(&self) -> Option>> { - // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` - - loop { - let head = self.head.load(Ordering::Acquire); - if let Some(nn_head) = NonNull::new(head) { - let next = unsafe { (*head).next }; - - match self.head.compare_exchange_weak( - head, - next, - Ordering::Release, // success - Ordering::Relaxed, // failure - ) { - Ok(_) => break Some(nn_head), - // interrupt occurred or other core made a successful STREX op on the head - Err(_) => continue, + match () { + #[cfg(target_arch = "x86_64")] + () => { + if let Some(p) = Ptr::new(p) { + self.stack.push(p); + } } - } else { - // stack is observed as empty - break None; - } - } - } - fn push(&self, mut new_head: NonNull>) { - // NOTE `Ordering`s come from crossbeam's (v0.6.0) `TreiberStack` - - let mut head = self.head.load(Ordering::Relaxed); - loop { - unsafe { new_head.as_mut().next = head } - - match self.head.compare_exchange_weak( - head, - new_head.as_ptr(), - Ordering::Release, // success - Ordering::Relaxed, // failure - ) { - Ok(_) => return, - // interrupt occurred or other core made a successful STREX op on the head - Err(p) => head = p, + #[cfg(not(target_arch = "x86_64"))] + () => self.stack.push(NonNull::from(p)), } } + cap } } -/// Unfortunate implementation detail required to use the -/// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method -pub struct Node { - data: UnsafeCell, - next: *mut Node, -} - /// A memory block pub struct Box { _state: PhantomData, - node: NonNull>, + node: Ptr>, } impl Box { @@ -513,7 +569,8 @@ mod tests { #[test] fn sanity() { - static mut MEMORY: [u8; 31] = [0; 31]; + const SZ: usize = 2 * mem::size_of::>() - 1; + static mut MEMORY: [u8; SZ] = [0; SZ]; static POOL: Pool = Pool::new(); diff --git a/src/pool/singleton.rs b/src/pool/singleton.rs index 785c900091..4d2979bbd2 100644 --- a/src/pool/singleton.rs +++ b/src/pool/singleton.rs @@ -15,7 +15,7 @@ use as_slice::{AsMutSlice, AsSlice}; use super::{Init, Node, Uninit}; /// Instantiates a pool as a global singleton -#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, test))] +#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, target_arch = "x86_64"))] #[macro_export] macro_rules! pool { ($(#[$($attr:tt)*])* $ident:ident: $ty:ty) => { @@ -194,7 +194,9 @@ where } } - P::ptr().push(self.inner.node) + if mem::size_of::() != 0 { + P::ptr().stack.push(self.inner.node) + } } } @@ -291,11 +293,12 @@ mod tests { sync::atomic::{AtomicUsize, Ordering}, }; - use super::Pool; + use super::{super::Node, Pool}; #[test] fn sanity() { - static mut MEMORY: [u8; 31] = [0; 31]; + const SZ: usize = 2 * mem::size_of::>() - 1; + static mut MEMORY: [u8; SZ] = [0; SZ]; pool!(A: u8); @@ -336,9 +339,6 @@ mod tests { } pool!(A: X); - static mut MEMORY: [u8; 23] = [0; 23]; - - A::grow(unsafe { &mut MEMORY }); let x = A::alloc().unwrap().init(X::new()); let y = A::alloc().unwrap().init(X::new()); diff --git a/tests/tsan.rs b/tests/tsan.rs index 890d0008b7..cdef4df2e2 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -232,3 +232,34 @@ fn iterator_properly_wraps() { } assert_eq!(expected, actual) } + +#[test] +fn pool() { + use heapless::pool::singleton::Pool as _; + + static mut M: [u8; (N + 1) * 8] = [0; (N + 1) * 8]; + const N: usize = 16 * 1024; + heapless::pool!(A: [u8; 8]); + + A::grow(unsafe { &mut M }); + + Pool::new(2).scoped(move |scope| { + scope.execute(move || { + for _ in 0..N / 4 { + let a = A::alloc().unwrap(); + let b = A::alloc().unwrap(); + drop(a); + let b = b.init([1; 8]); + drop(b); + } + }); + + scope.execute(move || { + for _ in 0..N / 2 { + let a = A::alloc().unwrap(); + let a = a.init([2; 8]); + drop(a); + } + }); + }); +} From 596a1f03f13a7b45b59659efc0d7ca050d54ed93 Mon Sep 17 00:00:00 2001 From: Jorge Aparicio Date: Sun, 3 May 2020 16:09:56 +0200 Subject: [PATCH 02/37] fix warnings --- src/lib.rs | 3 +-- src/pool/cas.rs | 2 +- src/pool/llsc.rs | 4 +++- src/pool/mod.rs | 6 ++---- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index b2b88ff616..221e169c80 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -73,8 +73,7 @@ #![deny(missing_docs)] #![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] -// #![deny(warnings)] -#![allow(warnings)] // FIXME +#![deny(warnings)] pub use binary_heap::BinaryHeap; pub use generic_array::typenum::consts; diff --git a/src/pool/cas.rs b/src/pool/cas.rs index ffbedeaf21..2029502676 100644 --- a/src/pool/cas.rs +++ b/src/pool/cas.rs @@ -7,7 +7,7 @@ use core::{ cell::UnsafeCell, convert::TryFrom, marker::PhantomData, - mem::{self, MaybeUninit}, + mem, num::NonZeroUsize, ptr::NonNull, sync::atomic::{AtomicUsize, Ordering}, diff --git a/src/pool/llsc.rs b/src/pool/llsc.rs index 3d1c61f437..1aec52761c 100644 --- a/src/pool/llsc.rs +++ b/src/pool/llsc.rs @@ -4,9 +4,11 @@ pub use core::ptr::NonNull as Ptr; use core::{ cell::UnsafeCell, ptr, - sync::atomic::{self, AtomicPtr, Ordering}, + sync::atomic::{AtomicPtr, Ordering}, }; +/// Unfortunate implementation detail required to use the +/// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method pub struct Node { next: AtomicPtr>, pub(crate) data: UnsafeCell, diff --git a/src/pool/mod.rs b/src/pool/mod.rs index d4165bd312..cbc3eb0fba 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -224,16 +224,14 @@ //! //! 4. "Hazard pointers: Safe memory reclamation for lock-free objects." Michael, Maged M. -use core::{any::TypeId, mem, sync::atomic::Ordering}; +use core::{any::TypeId, mem}; use core::{ - cell::UnsafeCell, cmp, fmt, hash::{Hash, Hasher}, marker::PhantomData, mem::MaybeUninit, ops::{Deref, DerefMut}, ptr, - sync::atomic::AtomicPtr, }; use as_slice::{AsMutSlice, AsSlice}; @@ -394,7 +392,7 @@ impl Pool { } #[cfg(not(target_arch = "x86_64"))] - () => self.stack.push(NonNull::from(p)), + () => self.stack.push(core::ptr::NonNull::from(p)), } } cap From b9d7128e1efdfe4e2c454e024c5a6a78f474240e Mon Sep 17 00:00:00 2001 From: Jorge Aparicio Date: Sun, 3 May 2020 17:07:46 +0200 Subject: [PATCH 03/37] x86: put Sync impl behind a Cargo feature --- Cargo.toml | 2 ++ ci/script.sh | 8 ++++---- src/pool/mod.rs | 12 +++++++++++- src/pool/singleton.rs | 8 +++++++- 4 files changed, 24 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f1a0cb1135..73be0c7e7c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -23,6 +23,8 @@ version = "0.5.4" default = ["cas"] cas = [] ufmt-impl = ["ufmt-write"] +# read the docs before enabling: makes `Pool` Sync on x86_64 +x86-sync-pool = [] # only for tests __trybuild = [] diff --git a/ci/script.sh b/ci/script.sh index 48f9acff76..1d1f266644 100644 --- a/ci/script.sh +++ b/ci/script.sh @@ -5,8 +5,8 @@ main() { cargo check --target $TARGET --features 'serde' if [ $TARGET = x86_64-unknown-linux-gnu ]; then - cargo test --target $TARGET --features 'serde' - cargo test --target $TARGET --release --features 'serde' + cargo test --test cpass --target $TARGET --features 'serde' + cargo test --test cpass --target $TARGET --release --features 'serde' if [ $MSRV = 1 ]; then cd cfail @@ -19,8 +19,8 @@ main() { export RUSTFLAGS="-Z sanitizer=thread" export TSAN_OPTIONS="suppressions=$(pwd)/suppressions.txt" - cargo test --test tsan --target $TARGET - cargo test --test tsan --target $TARGET --release + cargo test --test tsan --features x86-sync-pool --target $TARGET + cargo test --test tsan --features x86-sync-pool --target $TARGET --release fi fi } diff --git a/src/pool/mod.rs b/src/pool/mod.rs index cbc3eb0fba..739165f070 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -153,6 +153,8 @@ //! //! # x86_64 support / limitations //! +//! *NOTE* `Pool` is only `Sync` on `x86_64` if the Cargo feature "x86-sync-pool" is enabled +//! //! x86_64 support is a gamble. Yes, a gamble. Do you feel lucky enough to use `Pool` on x86_64? //! //! As it's not possible to implement *ideal* LL/SC semantics (\*) on x86_64 the architecture is @@ -253,7 +255,15 @@ pub struct Pool { _not_send_or_sync: PhantomData<*const ()>, } -#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, target_arch = "x86_64"))] +// NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing) +#[cfg(any( + armv7a, + armv7r, + armv7m, + armv8m_main, + all(target_arch = "x86_64", feature = "x86-sync-pool"), + test +))] unsafe impl Sync for Pool {} unsafe impl Send for Pool {} diff --git a/src/pool/singleton.rs b/src/pool/singleton.rs index 4d2979bbd2..b3e363057a 100644 --- a/src/pool/singleton.rs +++ b/src/pool/singleton.rs @@ -15,7 +15,13 @@ use as_slice::{AsMutSlice, AsSlice}; use super::{Init, Node, Uninit}; /// Instantiates a pool as a global singleton -#[cfg(any(armv7a, armv7r, armv7m, armv8m_main, target_arch = "x86_64"))] +#[cfg(any( + armv7a, + armv7r, + armv7m, + armv8m_main, + all(target_arch = "x86_64", feature = "x86-sync-pool"), +))] #[macro_export] macro_rules! pool { ($(#[$($attr:tt)*])* $ident:ident: $ty:ty) => { From b0129f540881277b7132a526d405ae131d5864ee Mon Sep 17 00:00:00 2001 From: Jorge Aparicio Date: Mon, 4 May 2020 20:45:52 +0200 Subject: [PATCH 04/37] 1.36.0 compatibility --- src/pool/cas.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/pool/cas.rs b/src/pool/cas.rs index 2029502676..4632397527 100644 --- a/src/pool/cas.rs +++ b/src/pool/cas.rs @@ -150,10 +150,7 @@ impl Ptr { fn as_raw(&self) -> NonNull { unsafe { NonNull::new_unchecked( - anchor::() - .cast::() - .offset(self.offset() as isize) - .cast(), + (anchor::() as *mut u8).offset(self.offset() as isize) as *mut T ) } } From 357a312dba36763169dfc4f173dde5626f9643f3 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 4 May 2020 14:38:47 +0200 Subject: [PATCH 05/37] Preparing for v0.5.5 --- CHANGELOG.md | 13 ++++++++++++- Cargo.toml | 2 +- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 51abed0116..69e9dd8e1b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,16 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +## [v0.5.5] - 2020-05-04 + +### Added + +- Added `HistoryBuffer` +- Added extra methods to `Vec`: `from_slice`, `starts_with`, `ends_with` +- Optional `ufmt` support for `String` and `Vec` +- Added `pool` support for bare-metal `armebv7r-` targets +- Added Sync to `pool` for `x86` + ## [v0.5.4] - 2020-04-06 ### Added @@ -294,7 +304,8 @@ architecture. - Initial release -[Unreleased]: https://github.com/japaric/heapless/compare/v0.5.4...HEAD +[Unreleased]: https://github.com/japaric/heapless/compare/v0.5.5...HEAD +[v0.5.5]: https://github.com/japaric/heapless/compare/v0.5.4...v0.5.5 [v0.5.4]: https://github.com/japaric/heapless/compare/v0.5.3...v0.5.4 [v0.5.3]: https://github.com/japaric/heapless/compare/v0.5.2...v0.5.3 [v0.5.2]: https://github.com/japaric/heapless/compare/v0.5.1...v0.5.2 diff --git a/Cargo.toml b/Cargo.toml index 73be0c7e7c..0b2bbdb690 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,7 +17,7 @@ keywords = [ license = "MIT OR Apache-2.0" name = "heapless" repository = "https://github.com/japaric/heapless" -version = "0.5.4" +version = "0.5.5" [features] default = ["cas"] From 87917e059b479063a80d4d8ba9449ba02fa3c9ab Mon Sep 17 00:00:00 2001 From: Bryan Kadzban Date: Sat, 11 Apr 2020 21:23:57 -0700 Subject: [PATCH 06/37] Make Producer<..., SingleCore> Send, like Consumer I assume there's no reason that only multi-core Producers are marked Send, while Consumers are not (they're pretty symmetrical). --- src/spsc/split.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/spsc/split.rs b/src/spsc/split.rs index 3614af1f62..6676ac5681 100644 --- a/src/spsc/split.rs +++ b/src/spsc/split.rs @@ -61,11 +61,12 @@ where _marker: PhantomData<&'a ()>, } -unsafe impl<'a, T, N, U> Send for Producer<'a, T, N, U> +unsafe impl<'a, T, N, U, C> Send for Producer<'a, T, N, U, C> where N: ArrayLength, T: Send, U: sealed::Uxx, + C: sealed::XCore, { } From 1790c4f30f6e5197d739dec8757faced7360ac6c Mon Sep 17 00:00:00 2001 From: Willem Date: Wed, 22 Apr 2020 09:44:26 +0200 Subject: [PATCH 07/37] indexmap: expose PowerOfTwo, Bucket and Pos This change allows the create custom structs with size arguments outside the heapless crate itself. --- src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 221e169c80..4d95fe2427 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -76,9 +76,9 @@ #![deny(warnings)] pub use binary_heap::BinaryHeap; -pub use generic_array::typenum::consts; +pub use generic_array::typenum::{consts, PowerOfTwo}; pub use generic_array::ArrayLength; -pub use indexmap::{FnvIndexMap, IndexMap}; +pub use indexmap::{Bucket, FnvIndexMap, IndexMap, Pos}; pub use indexset::{FnvIndexSet, IndexSet}; pub use linear_map::LinearMap; pub use string::String; From 0ab806d5cd79e9eefc7566afbfc12f65a7bd9837 Mon Sep 17 00:00:00 2001 From: Andres Vahter Date: Sat, 9 May 2020 19:46:06 +0300 Subject: [PATCH 08/37] readme: add instructions for tests --- README.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a0f8592ef5..0fb6fef6a9 100644 --- a/README.md +++ b/README.md @@ -5,11 +5,20 @@ > `static` friendly data structures that don't require dynamic memory allocation -# [Documentation](https://japaric.github.io/heapless/heapless/index.html) +## [Documentation](https://japaric.github.io/heapless/heapless/index.html) -# [Change log](CHANGELOG.md) +## [Change log](CHANGELOG.md) -# License +## Tests + +```bash +# run all +cargo test --features 'serde','x86-sync-pool' +# run only for example histbuf tests +cargo test histbuf --features 'serde','x86-sync-pool' +``` + +## License Licensed under either of From 26dcfa3ae6688ccb8652e45146382caeb7550012 Mon Sep 17 00:00:00 2001 From: Andres Vahter Date: Sat, 9 May 2020 20:05:19 +0300 Subject: [PATCH 09/37] histbuf: replace slow modulo operatins on cortex m0 `%` is extremely costly --- src/histbuf.rs | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/src/histbuf.rs b/src/histbuf.rs index 31d36cd965..85f0650c15 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -122,7 +122,10 @@ where /// Writes an element to the buffer, overwriting the oldest value. pub fn write(&mut self, t: T) { self.data[self.write_at] = t; - self.write_at = (self.write_at + 1) % self.len(); + self.write_at = self.write_at + 1; + if self.write_at == self.len() { + self.write_at = 0; + } } /// Clones and writes all elements in a slice to the buffer. @@ -152,7 +155,11 @@ where /// assert_eq!(x.recent(), &10); /// ``` pub fn recent(&self) -> &T { - &self.data[(self.write_at + self.len() - 1) % self.len()] + if self.write_at == 0 { + &self.data[(self.len() - 1)] + } else { + &self.data[(self.write_at - 1)] + } } /// Returns the array slice backing the buffer, without keeping track From 9e08514d39650bc44e5601fd50b2bde1c19019b3 Mon Sep 17 00:00:00 2001 From: Andres Vahter Date: Sat, 9 May 2020 21:32:55 +0300 Subject: [PATCH 10/37] histbuf: fix style --- src/histbuf.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/histbuf.rs b/src/histbuf.rs index 85f0650c15..3239f83635 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -122,7 +122,7 @@ where /// Writes an element to the buffer, overwriting the oldest value. pub fn write(&mut self, t: T) { self.data[self.write_at] = t; - self.write_at = self.write_at + 1; + self.write_at += 1; if self.write_at == self.len() { self.write_at = 0; } @@ -156,9 +156,9 @@ where /// ``` pub fn recent(&self) -> &T { if self.write_at == 0 { - &self.data[(self.len() - 1)] + &self.data[self.len() - 1] } else { - &self.data[(self.write_at - 1)] + &self.data[self.write_at - 1] } } From 0364e0576607caf49d198c517801f278bba53bcd Mon Sep 17 00:00:00 2001 From: samlich <1349989+samlich@users.noreply.github.com> Date: Fri, 24 Jul 2020 12:25:54 +0000 Subject: [PATCH 11/37] Vec: add set_len --- src/vec.rs | 99 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 98 insertions(+), 1 deletion(-) diff --git a/src/vec.rs b/src/vec.rs index f3553fce86..70c1878d76 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -327,6 +327,103 @@ where self.resize(new_len, T::default()) } + /// Forces the length of the vector to `new_len`. + /// + /// This is a low-level operation that maintains none of the normal + /// invariants of the type. Normally changing the length of a vector + /// is done using one of the safe operations instead, such as + /// [`truncate`], [`resize`], [`extend`], or [`clear`]. + /// + /// [`truncate`]: #method.truncate + /// [`resize`]: #method.resize + /// [`extend`]: https://doc.rust-lang.org/stable/core/iter/trait.Extend.html#tymethod.extend + /// [`clear`]: #method.clear + /// + /// # Safety + /// + /// - `new_len` must be less than or equal to [`capacity()`]. + /// - The elements at `old_len..new_len` must be initialized. + /// + /// [`capacity()`]: #method.capacity + /// + /// # Examples + /// + /// This method can be useful for situations in which the vector + /// is serving as a buffer for other code, particularly over FFI: + /// + /// ```no_run + /// # #![allow(dead_code)] + /// use heapless::Vec; + /// use heapless::consts::*; + /// + /// # // This is just a minimal skeleton for the doc example; + /// # // don't use this as a starting point for a real library. + /// # pub struct StreamWrapper { strm: *mut core::ffi::c_void } + /// # const Z_OK: i32 = 0; + /// # extern "C" { + /// # fn deflateGetDictionary( + /// # strm: *mut core::ffi::c_void, + /// # dictionary: *mut u8, + /// # dictLength: *mut usize, + /// # ) -> i32; + /// # } + /// # impl StreamWrapper { + /// pub fn get_dictionary(&self) -> Option> { + /// // Per the FFI method's docs, "32768 bytes is always enough". + /// let mut dict = Vec::new(); + /// let mut dict_length = 0; + /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that: + /// // 1. `dict_length` elements were initialized. + /// // 2. `dict_length` <= the capacity (32_768) + /// // which makes `set_len` safe to call. + /// unsafe { + /// // Make the FFI call... + /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length); + /// if r == Z_OK { + /// // ...and update the length to what was initialized. + /// dict.set_len(dict_length); + /// Some(dict) + /// } else { + /// None + /// } + /// } + /// } + /// # } + /// ``` + /// + /// While the following example is sound, there is a memory leak since + /// the inner vectors were not freed prior to the `set_len` call: + /// + /// ``` + /// use core::iter::FromIterator; + /// use heapless::Vec; + /// use heapless::consts::*; + /// + /// let mut vec = Vec::, U3>::from_iter( + /// [ + /// Vec::from_iter([1, 0, 0].iter().cloned()), + /// Vec::from_iter([0, 1, 0].iter().cloned()), + /// Vec::from_iter([0, 0, 1].iter().cloned()), + /// ] + /// .iter() + /// .cloned() + /// ); + /// // SAFETY: + /// // 1. `old_len..0` is empty so no elements need to be initialized. + /// // 2. `0 <= capacity` always holds whatever `capacity` is. + /// unsafe { + /// vec.set_len(0); + /// } + /// ``` + /// + /// Normally, here, one would use [`clear`] instead to correctly drop + /// the contents and thus not leak memory. + pub unsafe fn set_len(&mut self, new_len: usize) { + debug_assert!(new_len <= self.capacity()); + + self.0.len = new_len + } + /// Removes an element from the vector and returns it. /// /// The removed element is replaced by the last element of the vector. @@ -726,8 +823,8 @@ where #[cfg(test)] mod tests { - use as_slice::AsSlice; use crate::{consts::*, Vec}; + use as_slice::AsSlice; use core::fmt::Write; #[test] From 6ba462eb93185f7418feffa19320912cffa32687 Mon Sep 17 00:00:00 2001 From: samlich <1349989+samlich@users.noreply.github.com> Date: Fri, 24 Jul 2020 12:26:22 +0000 Subject: [PATCH 12/37] String: add as_mut_vec --- src/string.rs | 28 ++++++++++++++++++++++++++++ src/vec.rs | 2 ++ 2 files changed, 30 insertions(+) diff --git a/src/string.rs b/src/string.rs index 4c8222699d..90dc54e66d 100644 --- a/src/string.rs +++ b/src/string.rs @@ -178,6 +178,34 @@ where unsafe { str::from_utf8_unchecked_mut(self.0.vec.as_mut_slice()) } } + /// Returns a mutable reference to the contents of this `String`. + /// + /// # Safety + /// + /// This function is unsafe because it does not check that the bytes passed + /// to it are valid UTF-8. If this constraint is violated, it may cause + /// memory unsafety issues with future users of the `String`, as the rest of + /// the library assumes that `String`s are valid UTF-8. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// let mut s = String::from("hello"); + /// + /// unsafe { + /// let vec = s.as_mut_vec(); + /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]); + /// + /// vec.reverse(); + /// } + /// assert_eq!(s, "olleh"); + /// ``` + pub unsafe fn as_mut_vec(&mut self) -> &mut Vec { + &mut *(&mut self.0.vec as *mut crate::i::Vec> as *mut Vec) + } + /// Appends a given string slice onto the end of this `String`. /// /// # Examples diff --git a/src/vec.rs b/src/vec.rs index 70c1878d76..65121224ef 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -160,6 +160,8 @@ where /// } /// assert_eq!(vec, [7, 1, 2, 3]); /// ``` +// repr(transparent) is needed for [`String::as_mut_vec`] +#[repr(transparent)] pub struct Vec(#[doc(hidden)] pub crate::i::Vec>) where N: ArrayLength; From f962e3a45fbccd01d002f4e07d57ba0127e39477 Mon Sep 17 00:00:00 2001 From: Per Lindgren Date: Thu, 20 Aug 2020 16:00:55 +0200 Subject: [PATCH 13/37] Initial conversion to const generics vec passes tests vec passes tests with docs as well (besides one with FromIter) vec passes tests with docs as well (besides one with FromIter) exposing full API passing all current tests starting with string string test passes mostly string test passes ufmt passes TODO removed binary_heap wip binary_heap passes tests sealed passes spsc wip spsc wip2 split wip3 spcs and split passes --lib tests spcs and split passes --lib tests spcs and split passes --lib tests spcs and split passes all tests (doc + lib) indexmap wip indexmap passes --lib test indexmap passes all tests (lib + doc) indexset passes all tests (lib + doc) indexset passes all tests (lib + doc) linear map wip linear map all test (lib + doc) passes, drop not tested, into_iter(mut self) not implemented history buffer all test pass (doc + lib), Copy instead of clone atm serde does not work pool works, serde still not serde wip serde wip serde wip serde wip --- Cargo.toml | 2 +- src/binary_heap.rs | 153 +++++------ src/de.rs | 63 ++--- src/histbuf.rs | 67 ++--- src/indexmap.rs | 138 ++++------ src/indexset.rs | 177 +++++------- src/lib.rs | 26 +- src/linear_map.rs | 189 ++++++------- src/sealed.rs | 1 - src/ser.rs | 24 +- src/spsc/mod.rs | 254 +++++++----------- src/spsc/split.rs | 77 +++--- src/string.rs | 370 +++++++++++++------------- src/ufmt.rs | 24 +- src/vec.rs | 650 +++++++++++++++++++++------------------------ tests/cpass.rs | 16 +- tests/tsan.rs | 472 ++++++++++++++++---------------- 17 files changed, 1207 insertions(+), 1496 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0b2bbdb690..38b3988175 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,7 @@ scoped_threadpool = "0.1.8" [dependencies] as-slice = "0.1.0" -generic-array = "0.13.0" +# generic-array = "0.13.0" hash32 = "0.1.0" [dependencies.serde] diff --git a/src/binary_heap.rs b/src/binary_heap.rs index aa56c57b72..15d16aa948 100644 --- a/src/binary_heap.rs +++ b/src/binary_heap.rs @@ -16,9 +16,8 @@ use core::{ ptr, slice, }; -use generic_array::{ArrayLength, GenericArray}; - use crate::sealed::binary_heap::Kind; +use crate::vec::Vec; /// Min-heap pub enum Min {} @@ -26,17 +25,6 @@ pub enum Min {} /// Max-heap pub enum Max {} -impl crate::i::BinaryHeap { - /// `BinaryHeap` `const` constructor; wrap the returned value in - /// [`BinaryHeap`](../struct.BinaryHeap.html) - pub const fn new() -> Self { - Self { - _kind: PhantomData, - data: crate::i::Vec::new(), - } - } -} - /// A priority queue implemented with a binary heap. /// /// This can be either a min-heap or a max-heap. @@ -47,9 +35,8 @@ impl crate::i::BinaryHeap { /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; -/// use heapless::consts::*; /// -/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); +/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// // We can use peek to look at the next item in the heap. In this case, /// // there's no items in there yet so we get None. @@ -84,18 +71,19 @@ impl crate::i::BinaryHeap { /// // The heap should now be empty. /// assert!(heap.is_empty()) /// ``` -pub struct BinaryHeap( - #[doc(hidden)] pub crate::i::BinaryHeap, KIND>, -) + +pub struct BinaryHeap where T: Ord, - N: ArrayLength, - KIND: Kind; + K: Kind, +{ + pub(crate) _kind: PhantomData, + pub(crate) data: Vec, +} -impl BinaryHeap +impl BinaryHeap where T: Ord, - N: ArrayLength, K: Kind, { /* Constructors */ @@ -103,32 +91,33 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// /// // allocate the binary heap on the stack - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(4).unwrap(); /// /// // allocate the binary heap in a static variable - /// static mut HEAP: BinaryHeap = BinaryHeap(heapless::i::BinaryHeap::new()); + /// static mut HEAP: BinaryHeap = BinaryHeap::new(); /// ``` - pub fn new() -> Self { - BinaryHeap(crate::i::BinaryHeap::new()) + pub const fn new() -> Self { + Self { + _kind: PhantomData, + data: Vec::new(), + } } /* Public API */ /// Returns the capacity of the binary heap. pub fn capacity(&self) -> usize { - self.0.data.capacity() + self.data.capacity() } /// Drops all items from the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// @@ -139,32 +128,30 @@ where /// assert!(heap.is_empty()); /// ``` pub fn clear(&mut self) { - self.0.data.clear() + self.data.clear() } /// Returns the length of the binary heap. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// /// assert_eq!(heap.len(), 2); /// ``` pub fn len(&self) -> usize { - self.0.data.len + self.data.len() } /// Checks if the binary heap is empty. /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// /// assert!(heap.is_empty()); /// @@ -182,9 +169,8 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(2).unwrap(); /// heap.push(3).unwrap(); @@ -197,7 +183,7 @@ where /// } /// ``` pub fn iter(&self) -> slice::Iter<'_, T> { - self.0.data.as_slice().iter() + self.data.as_slice().iter() } /// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order. @@ -205,7 +191,7 @@ where /// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent /// state. pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> { - self.0.data.as_mut_slice().iter_mut() + self.data.as_mut_slice().iter_mut() } /// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or @@ -213,9 +199,8 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert_eq!(heap.peek(), None); /// /// heap.push(1).unwrap(); @@ -224,7 +209,7 @@ where /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn peek(&self) -> Option<&T> { - self.0.data.as_slice().get(0) + self.data.as_slice().get(0) } /// Returns a mutable reference to the greatest item in the binary heap, or @@ -239,9 +224,8 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// assert!(heap.peek_mut().is_none()); /// /// heap.push(1); @@ -254,7 +238,7 @@ where /// /// assert_eq!(heap.peek(), Some(&2)); /// ``` - pub fn peek_mut(&mut self) -> Option> { + pub fn peek_mut(&mut self) -> Option> { if self.is_empty() { None } else { @@ -270,9 +254,8 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(1).unwrap(); /// heap.push(3).unwrap(); /// @@ -291,10 +274,10 @@ where /// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and /// returns it, without checking if the binary heap is empty. pub unsafe fn pop_unchecked(&mut self) -> T { - let mut item = self.0.data.pop_unchecked(); + let mut item = self.data.pop_unchecked(); if !self.is_empty() { - mem::swap(&mut item, self.0.data.as_mut_slice().get_unchecked_mut(0)); + mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0)); self.sift_down_to_bottom(0); } item @@ -304,9 +287,8 @@ where /// /// ``` /// use heapless::binary_heap::{BinaryHeap, Max}; - /// use heapless::consts::*; /// - /// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new(); + /// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new(); /// heap.push(3).unwrap(); /// heap.push(5).unwrap(); /// heap.push(1).unwrap(); @@ -315,7 +297,7 @@ where /// assert_eq!(heap.peek(), Some(&5)); /// ``` pub fn push(&mut self, item: T) -> Result<(), T> { - if self.0.data.is_full() { + if self.data.is_full() { return Err(item); } @@ -326,7 +308,7 @@ where /// Pushes an item onto the binary heap without first checking if it's full. pub unsafe fn push_unchecked(&mut self, item: T) { let old_len = self.len(); - self.0.data.push_unchecked(item); + self.data.push_unchecked(item); self.sift_up(0, old_len); } @@ -335,7 +317,7 @@ where let end = self.len(); let start = pos; unsafe { - let mut hole = Hole::new(self.0.data.as_mut_slice(), pos); + let mut hole = Hole::new(self.data.as_mut_slice(), pos); let mut child = 2 * pos + 1; while child < end { let right = child + 1; @@ -354,7 +336,7 @@ where fn sift_up(&mut self, start: usize, pos: usize) -> usize { unsafe { // Take out the value at `pos` and create a hole. - let mut hole = Hole::new(self.0.data.as_mut_slice(), pos); + let mut hole = Hole::new(self.data.as_mut_slice(), pos); while hole.pos() > start { let parent = (hole.pos() - 1) / 2; @@ -437,20 +419,18 @@ impl<'a, T> Hole<'a, T> { /// /// [`peek_mut`]: struct.BinaryHeap.html#method.peek_mut /// [`BinaryHeap`]: struct.BinaryHeap.html -pub struct PeekMut<'a, T, N, K> +pub struct PeekMut<'a, T, K, const N: usize> where T: Ord, - N: ArrayLength, K: Kind, { - heap: &'a mut BinaryHeap, + heap: &'a mut BinaryHeap, sift: bool, } -impl Drop for PeekMut<'_, T, N, K> +impl Drop for PeekMut<'_, T, K, N> where T: Ord, - N: ArrayLength, K: Kind, { fn drop(&mut self) { @@ -460,41 +440,38 @@ where } } -impl Deref for PeekMut<'_, T, N, K> +impl Deref for PeekMut<'_, T, K, N> where T: Ord, - N: ArrayLength, K: Kind, { type Target = T; fn deref(&self) -> &T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps - unsafe { self.heap.0.data.as_slice().get_unchecked(0) } + unsafe { self.heap.data.as_slice().get_unchecked(0) } } } -impl DerefMut for PeekMut<'_, T, N, K> +impl DerefMut for PeekMut<'_, T, K, N> where T: Ord, - N: ArrayLength, K: Kind, { fn deref_mut(&mut self) -> &mut T { debug_assert!(!self.heap.is_empty()); // SAFE: PeekMut is only instantiated for non-empty heaps - unsafe { self.heap.0.data.as_mut_slice().get_unchecked_mut(0) } + unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) } } } -impl<'a, T, N, K> PeekMut<'a, T, N, K> +impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N> where T: Ord, - N: ArrayLength, K: Kind, { /// Removes the peeked value from the heap and returns it. - pub fn pop(mut this: PeekMut<'a, T, N, K>) -> T { + pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T { let value = this.heap.pop().unwrap(); this.sift = false; value @@ -512,10 +489,9 @@ impl<'a, T> Drop for Hole<'a, T> { } } -impl Default for BinaryHeap +impl Default for BinaryHeap where T: Ord, - N: ArrayLength, K: Kind, { fn default() -> Self { @@ -523,34 +499,31 @@ where } } -impl Clone for BinaryHeap +impl Clone for BinaryHeap where - N: ArrayLength, K: Kind, T: Ord + Clone, { fn clone(&self) -> Self { - BinaryHeap(crate::i::BinaryHeap { - _kind: self.0._kind, - data: self.0.data.clone(), - }) + Self { + _kind: self._kind, + data: self.data.clone(), + } } } -impl Drop for BinaryHeap +impl Drop for BinaryHeap where - N: ArrayLength, K: Kind, T: Ord, { fn drop(&mut self) { - unsafe { ptr::drop_in_place(self.0.data.as_mut_slice()) } + unsafe { ptr::drop_in_place(self.data.as_mut_slice()) } } } -impl fmt::Debug for BinaryHeap +impl fmt::Debug for BinaryHeap where - N: ArrayLength, K: Kind, T: Ord + fmt::Debug, { @@ -559,9 +532,8 @@ where } } -impl<'a, T, N, K> IntoIterator for &'a BinaryHeap +impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap where - N: ArrayLength, K: Kind, T: Ord, { @@ -577,19 +549,16 @@ where mod tests { use std::vec::Vec; - use crate::{ - binary_heap::{self, BinaryHeap, Min}, - consts::*, - }; + use crate::binary_heap::{BinaryHeap, Max, Min}; #[test] fn static_new() { - static mut _B: BinaryHeap = BinaryHeap(crate::i::BinaryHeap::new()); + static mut _B: BinaryHeap = BinaryHeap::new(); } #[test] fn min() { - let mut heap = BinaryHeap::<_, U16, Min>::new(); + let mut heap = BinaryHeap::<_, Min, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); @@ -641,7 +610,7 @@ mod tests { #[test] fn max() { - let mut heap = BinaryHeap::<_, U16, binary_heap::Max>::new(); + let mut heap = BinaryHeap::<_, Max, 16>::new(); heap.push(1).unwrap(); heap.push(2).unwrap(); heap.push(3).unwrap(); diff --git a/src/de.rs b/src/de.rs index af74019357..7da79d5821 100644 --- a/src/de.rs +++ b/src/de.rs @@ -1,6 +1,8 @@ +//! missing doc + use core::{fmt, marker::PhantomData}; -use generic_array::{typenum::PowerOfTwo, ArrayLength}; +// use generic_array::{typenum::PowerOfTwo, ArrayLength}; use hash32::{BuildHasherDefault, Hash, Hasher}; use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; @@ -12,25 +14,24 @@ use crate::{ // Sequential containers -impl<'de, T, N, KIND> Deserialize<'de> for BinaryHeap +impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap where T: Ord + Deserialize<'de>, - N: ArrayLength, + KIND: BinaryHeapKind, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, T, N, KIND>(PhantomData<(&'de (), T, N, KIND)>); + struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>); - impl<'de, T, N, KIND> de::Visitor<'de> for ValueVisitor<'de, T, N, KIND> + impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N> where T: Ord + Deserialize<'de>, - N: ArrayLength, KIND: BinaryHeapKind, { - type Value = BinaryHeap; + type Value = BinaryHeap; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") @@ -55,25 +56,23 @@ where } } -impl<'de, T, N, S> Deserialize<'de> for IndexSet> +impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, - N: ArrayLength> + ArrayLength> + PowerOfTwo, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, T, N, S>(PhantomData<(&'de (), T, N, S)>); + struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>); - impl<'de, T, N, S> de::Visitor<'de> for ValueVisitor<'de, T, N, S> + impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N> where T: Eq + Hash + Deserialize<'de>, S: Hasher + Default, - N: ArrayLength> + ArrayLength> + PowerOfTwo, { - type Value = IndexSet>; + type Value = IndexSet, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") @@ -98,20 +97,18 @@ where } } -impl<'de, T, N> Deserialize<'de> for Vec +impl<'de, T, const N: usize> Deserialize<'de> for Vec where - N: ArrayLength, T: Deserialize<'de>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, T, N>(PhantomData<(&'de (), T, N)>); + struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>); - impl<'de, T, N> de::Visitor<'de> for ValueVisitor<'de, T, N> + impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N> where - N: ArrayLength, T: Deserialize<'de>, { type Value = Vec; @@ -141,27 +138,25 @@ where // Dictionaries -impl<'de, K, V, N, S> Deserialize<'de> for IndexMap> +impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, - N: ArrayLength> + ArrayLength> + PowerOfTwo, S: Default + Hasher, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, K, V, N, S>(PhantomData<(&'de (), K, V, N, S)>); + struct ValueVisitor<'de, K, V, S, const N:usize>(PhantomData<(&'de (), K, V, S)>); - impl<'de, K, V, N, S> de::Visitor<'de> for ValueVisitor<'de, K, V, N, S> + impl<'de, K, V, S, const N:usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, - N: ArrayLength> + ArrayLength> + PowerOfTwo, S: Default + Hasher, { - type Value = IndexMap>; + type Value = IndexMap, N>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") @@ -186,23 +181,21 @@ where } } -impl<'de, K, V, N> Deserialize<'de> for LinearMap +impl<'de, K, V, const N:usize> Deserialize<'de> for LinearMap where K: Eq + Deserialize<'de>, V: Deserialize<'de>, - N: ArrayLength<(K, V)>, { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, K, V, N>(PhantomData<(&'de (), K, V, N)>); + struct ValueVisitor<'de, K, V, const N:usize>(PhantomData<(&'de (), K, V)>); - impl<'de, K, V, N> de::Visitor<'de> for ValueVisitor<'de, K, V, N> + impl<'de, K, V, const N:usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> where K: Eq + Deserialize<'de>, V: Deserialize<'de>, - N: ArrayLength<(K, V)>, { type Value = LinearMap; @@ -231,19 +224,15 @@ where // String containers -impl<'de, N> Deserialize<'de> for String -where - N: ArrayLength, +impl<'de, const N:usize> Deserialize<'de> for String { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, N>(PhantomData<(&'de (), N)>); + struct ValueVisitor<'de, const N:usize>(PhantomData<(&'de ())>); - impl<'de, N> de::Visitor<'de> for ValueVisitor<'de, N> - where - N: ArrayLength, + impl<'de, const N:usize > de::Visitor<'de> for ValueVisitor<'de, N> { type Value = String; @@ -251,7 +240,7 @@ where write!( formatter, "a string no more than {} bytes long", - N::to_u64() + N as u64 ) } diff --git a/src/histbuf.rs b/src/histbuf.rs index 3239f83635..20a92e15f3 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -1,4 +1,4 @@ -use generic_array::{ArrayLength, GenericArray, sequence::GenericSequence}; +// use generic_array::{sequence::GenericSequence, ArrayLength, GenericArray}; /// A "history buffer", similar to a write-only ring buffer of fixed length. /// @@ -14,10 +14,9 @@ use generic_array::{ArrayLength, GenericArray, sequence::GenericSequence}; /// # Examples /// ``` /// use heapless::HistoryBuffer; -/// use heapless::consts::*; /// /// // Initialize a new buffer with 8 elements, all initially zero. -/// let mut buf = HistoryBuffer::<_, U8>::new(); +/// let mut buf = HistoryBuffer::<_, 8>::new(); /// /// buf.write(3); /// buf.write(5); @@ -34,19 +33,14 @@ use generic_array::{ArrayLength, GenericArray, sequence::GenericSequence}; /// assert_eq!(avg, 2); /// ``` #[derive(Clone)] -pub struct HistoryBuffer -where - N: ArrayLength, -{ - data: GenericArray, +pub struct HistoryBuffer { + data: [T; N], write_at: usize, } - -impl HistoryBuffer +impl HistoryBuffer where - N: ArrayLength, - T: Default, + T: Default + Copy, { /// Constructs a new history buffer, where every element is filled with the /// default value of the type `T`. @@ -57,16 +51,17 @@ where /// /// ``` /// use heapless::HistoryBuffer; - /// use heapless::consts::*; /// /// // Allocate a 16-element buffer on the stack - /// let mut x: HistoryBuffer = HistoryBuffer::new(); + /// let mut x: HistoryBuffer = HistoryBuffer::new(); /// // All elements are zero /// assert_eq!(x.as_slice(), [0; 16]); /// ``` pub fn new() -> Self { Self { - data: Default::default(), + // seems not yet implemented + // data: Default::default(), + data: [T::default(); N], write_at: 0, } } @@ -78,10 +73,9 @@ where } } -impl HistoryBuffer +impl HistoryBuffer where - N: ArrayLength, - T: Clone, + T: Copy + Clone, { /// Constructs a new history buffer, where every element is the given value. /// @@ -89,16 +83,15 @@ where /// /// ``` /// use heapless::HistoryBuffer; - /// use heapless::consts::*; /// /// // Allocate a 16-element buffer on the stack - /// let mut x: HistoryBuffer = HistoryBuffer::new_with(4); + /// let mut x: HistoryBuffer = HistoryBuffer::new_with(4); /// // All elements are four /// assert_eq!(x.as_slice(), [4; 16]); /// ``` pub fn new_with(t: T) -> Self { Self { - data: GenericArray::generate(|_| t.clone()), + data: [t; N], write_at: 0, } } @@ -109,10 +102,7 @@ where } } -impl HistoryBuffer -where - N: ArrayLength, -{ +impl HistoryBuffer { /// Returns the capacity of the buffer, which is the length of the /// underlying backing array. pub fn len(&self) -> usize { @@ -147,9 +137,8 @@ where /// /// ``` /// use heapless::HistoryBuffer; - /// use heapless::consts::*; /// - /// let mut x: HistoryBuffer = HistoryBuffer::new(); + /// let mut x: HistoryBuffer = HistoryBuffer::new(); /// x.write(4); /// x.write(10); /// assert_eq!(x.recent(), &10); @@ -169,10 +158,7 @@ where } } -impl Extend for HistoryBuffer -where - N: ArrayLength, -{ +impl Extend for HistoryBuffer { fn extend(&mut self, iter: I) where I: IntoIterator, @@ -183,10 +169,9 @@ where } } -impl<'a, T, N> Extend<&'a T> for HistoryBuffer +impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer where T: 'a + Clone, - N: ArrayLength, { fn extend(&mut self, iter: I) where @@ -198,21 +183,21 @@ where #[cfg(test)] mod tests { - use crate::{consts::*, HistoryBuffer}; + use crate::HistoryBuffer; #[test] fn new() { - let x: HistoryBuffer = HistoryBuffer::new_with(1); + let x: HistoryBuffer = HistoryBuffer::new_with(1); assert_eq!(x.len(), 4); assert_eq!(x.as_slice(), [1; 4]); - let x: HistoryBuffer = HistoryBuffer::new(); + let x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.as_slice(), [0; 4]); } #[test] fn write() { - let mut x: HistoryBuffer = HistoryBuffer::new(); + let mut x: HistoryBuffer = HistoryBuffer::new(); x.write(1); x.write(4); assert_eq!(x.as_slice(), [1, 4, 0, 0]); @@ -228,18 +213,18 @@ mod tests { #[test] fn clear() { - let mut x: HistoryBuffer = HistoryBuffer::new_with(1); + let mut x: HistoryBuffer = HistoryBuffer::new_with(1); x.clear(); assert_eq!(x.as_slice(), [0; 4]); - let mut x: HistoryBuffer = HistoryBuffer::new(); + let mut x: HistoryBuffer = HistoryBuffer::new(); x.clear_with(1); assert_eq!(x.as_slice(), [1; 4]); } #[test] fn recent() { - let mut x: HistoryBuffer = HistoryBuffer::new(); + let mut x: HistoryBuffer = HistoryBuffer::new(); assert_eq!(x.recent(), &0); x.write(1); @@ -254,7 +239,7 @@ mod tests { #[test] fn as_slice() { - let mut x: HistoryBuffer = HistoryBuffer::new(); + let mut x: HistoryBuffer = HistoryBuffer::new(); x.extend([1, 2, 3, 4, 5].iter()); diff --git a/src/indexmap.rs b/src/indexmap.rs index d12ea18de4..11cc1294cd 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -7,13 +7,13 @@ use core::{ ops, slice, }; -use generic_array::{typenum::PowerOfTwo, ArrayLength, GenericArray}; +// use generic_array::{typenum::PowerOfTwo, ArrayLength, GenericArray}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; use crate::Vec; /// An `IndexMap` using the default FNV hasher -pub type FnvIndexMap = IndexMap>; +pub type FnvIndexMap = IndexMap, N>; #[derive(Clone, Copy, Eq, PartialEq)] struct HashValue(u16); @@ -85,19 +85,17 @@ macro_rules! probe_loop { } } -struct CoreMap +struct CoreMap where K: Eq + Hash, - N: ArrayLength> + ArrayLength>, { entries: Vec, N>, - indices: GenericArray, N>, + indices: [Option; N], } -impl CoreMap +impl CoreMap where K: Eq + Hash, - N: ArrayLength> + ArrayLength>, { // TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn` fn new() -> Self { @@ -108,7 +106,7 @@ where } fn capacity() -> usize { - N::to_usize() + N } fn mask() -> usize { @@ -270,11 +268,10 @@ where } } -impl Clone for CoreMap +impl Clone for CoreMap where K: Eq + Hash + Clone, V: Clone, - N: ArrayLength> + ArrayLength>, { fn clone(&self) -> Self { Self { @@ -292,10 +289,9 @@ where /// /// ``` /// use heapless::FnvIndexMap; -/// use heapless::consts::*; /// /// // A hash map with a capacity of 16 key-value pairs allocated on the stack -/// let mut book_reviews = FnvIndexMap::<_, _, U16>::new(); +/// let mut book_reviews = FnvIndexMap::<_, _, 16>::new(); /// /// // review some books. /// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap(); @@ -326,20 +322,18 @@ where /// println!("{}: \"{}\"", book, review); /// } /// ``` -pub struct IndexMap +pub struct IndexMap where K: Eq + Hash, - N: ArrayLength> + ArrayLength>, { core: CoreMap, build_hasher: S, } -impl IndexMap> +impl IndexMap, N> where K: Eq + Hash, S: Default + Hasher, - N: ArrayLength> + ArrayLength> + PowerOfTwo, { // TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn` /// Creates an empty `IndexMap`. @@ -353,25 +347,23 @@ where } } -impl IndexMap +impl IndexMap where K: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { /* Public API */ /// Returns the number of elements the map can hold pub fn capacity(&self) -> usize { - N::to_usize() + N } /// Return an iterator over the keys of the map, in their order /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -388,9 +380,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -407,9 +398,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -430,9 +420,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -451,9 +440,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -481,9 +469,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut a = FnvIndexMap::<_, _, U16>::new(); + /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); @@ -498,9 +485,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut a = FnvIndexMap::<_, _, U16>::new(); + /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// assert!(a.is_empty()); /// a.insert(1, "a"); /// assert!(!a.is_empty()); @@ -515,9 +501,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut a = FnvIndexMap::<_, _, U16>::new(); + /// let mut a = FnvIndexMap::<_, _, 16>::new(); /// a.insert(1, "a"); /// a.clear(); /// assert!(a.is_empty()); @@ -538,9 +523,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U16>::new(); + /// let mut map = FnvIndexMap::<_, _, 16>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); @@ -565,9 +549,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U8>::new(); + /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); @@ -591,9 +574,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U8>::new(); + /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; @@ -630,9 +612,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U8>::new(); + /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// assert_eq!(map.insert(37, "a"), Ok(None)); /// assert_eq!(map.is_empty(), false); /// @@ -663,9 +644,8 @@ where /// /// ``` /// use heapless::FnvIndexMap; - /// use heapless::consts::*; /// - /// let mut map = FnvIndexMap::<_, _, U8>::new(); + /// let mut map = FnvIndexMap::<_, _, 8>::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); @@ -715,12 +695,12 @@ where } } -impl<'a, K, Q, V, N, S> ops::Index<&'a Q> for IndexMap +impl<'a, K, Q, V, S, const N: usize> ops::Index<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, + // N: ArrayLength> + ArrayLength>, { type Output = V; @@ -729,24 +709,24 @@ where } } -impl<'a, K, Q, V, N, S> ops::IndexMut<&'a Q> for IndexMap +impl<'a, K, Q, V, S, const N: usize> ops::IndexMut<&'a Q> for IndexMap where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, + // N: ArrayLength> + ArrayLength>, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("key not found") } } -impl Clone for IndexMap +impl Clone for IndexMap where K: Eq + Hash + Clone, V: Clone, S: Clone, - N: ArrayLength> + ArrayLength>, + // N: ArrayLength> + ArrayLength>, { fn clone(&self) -> Self { Self { @@ -756,23 +736,23 @@ where } } -impl fmt::Debug for IndexMap +impl fmt::Debug for IndexMap where K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher, - N: ArrayLength> + ArrayLength>, + // N: ArrayLength> + ArrayLength>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } -impl Default for IndexMap +impl Default for IndexMap where K: Eq + Hash, S: BuildHasher + Default, - N: ArrayLength> + ArrayLength>, + // N: ArrayLength> + ArrayLength>, { fn default() -> Self { IndexMap { @@ -782,16 +762,15 @@ where } } -impl PartialEq> for IndexMap +impl PartialEq> + for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, - N: ArrayLength> + ArrayLength>, S2: BuildHasher, - N2: ArrayLength> + ArrayLength>, { - fn eq(&self, other: &IndexMap) -> bool { + fn eq(&self, other: &IndexMap) -> bool { self.len() == other.len() && self .iter() @@ -799,20 +778,18 @@ where } } -impl Eq for IndexMap +impl Eq for IndexMap where K: Eq + Hash, V: Eq, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { } -impl Extend<(K, V)> for IndexMap +impl Extend<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { fn extend(&mut self, iterable: I) where @@ -824,12 +801,11 @@ where } } -impl<'a, K, V, N, S> Extend<(&'a K, &'a V)> for IndexMap +impl<'a, K, V, S, const N: usize> Extend<(&'a K, &'a V)> for IndexMap where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { fn extend(&mut self, iterable: I) where @@ -839,11 +815,10 @@ where } } -impl FromIterator<(K, V)> for IndexMap +impl FromIterator<(K, V)> for IndexMap where K: Eq + Hash, S: BuildHasher + Default, - N: ArrayLength> + ArrayLength>, { fn from_iter(iterable: I) -> Self where @@ -855,11 +830,10 @@ where } } -impl<'a, K, V, N, S> IntoIterator for &'a IndexMap +impl<'a, K, V, S, const N: usize> IntoIterator for &'a IndexMap where K: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -869,11 +843,10 @@ where } } -impl<'a, K, V, N, S> IntoIterator for &'a mut IndexMap +impl<'a, K, V, S, const N: usize> IntoIterator for &'a mut IndexMap where K: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -929,21 +902,16 @@ where #[cfg(test)] mod tests { + use crate::FnvIndexMap; use core::mem; - use generic_array::typenum::Unsigned; - - use crate::{consts::*, FnvIndexMap}; - #[test] fn size() { - type Cap = U4; - - let cap = Cap::to_usize(); + const CAP: usize = 4; assert_eq!( - mem::size_of::>(), - cap * mem::size_of::() + // indices - cap * (mem::size_of::() + // key + mem::size_of::>(), + CAP * mem::size_of::() + // indices + CAP * (mem::size_of::() + // key mem::size_of::() + // value mem::size_of::() // hash ) + // buckets @@ -954,10 +922,10 @@ mod tests { #[test] fn partial_eq() { { - let mut a: FnvIndexMap<_, _, U4> = FnvIndexMap::new(); + let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); - let mut b: FnvIndexMap<_, _, U4> = FnvIndexMap::new(); + let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); @@ -968,11 +936,11 @@ mod tests { } { - let mut a: FnvIndexMap<_, _, U4> = FnvIndexMap::new(); + let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); - let mut b: FnvIndexMap<_, _, U4> = FnvIndexMap::new(); + let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); diff --git a/src/indexset.rs b/src/indexset.rs index 4c80fa1795..1b799ffd16 100644 --- a/src/indexset.rs +++ b/src/indexset.rs @@ -1,12 +1,13 @@ use core::{borrow::Borrow, fmt, iter::FromIterator}; -use generic_array::{typenum::PowerOfTwo, ArrayLength}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; use crate::indexmap::{self, Bucket, IndexMap, Pos}; /// An `IndexSet` using the default FNV hasher -pub type FnvIndexSet = IndexSet>; +pub type FnvIndexSet = IndexSet, N>; + +// TODO: We don't enforce the power of 2 currently (part of generic array bounds) /// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/1/indexmap/set/struct.IndexSet.html) /// @@ -16,10 +17,9 @@ pub type FnvIndexSet = IndexSet>; /// /// ``` /// use heapless::FnvIndexSet; -/// use heapless::consts::*; /// /// // A hash set with a capacity of 16 elements allocated on the stack -/// let mut books = FnvIndexSet::<_, U16>::new(); +/// let mut books = FnvIndexSet::<_, 16>::new(); /// /// // Add some books. /// books.insert("A Dance With Dragons").unwrap(); @@ -41,19 +41,17 @@ pub type FnvIndexSet = IndexSet>; /// println!("{}", book); /// } /// ``` -pub struct IndexSet +pub struct IndexSet where T: Eq + Hash, - N: ArrayLength> + ArrayLength>, { - map: IndexMap, + map: IndexMap, } -impl IndexSet> +impl IndexSet, N> where T: Eq + Hash, S: Default + Hasher, - N: ArrayLength> + ArrayLength> + PowerOfTwo, { /// Creates an empty `IndexSet` pub fn new() -> Self { @@ -63,11 +61,10 @@ where } } -impl IndexSet +impl IndexSet where T: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { /// Returns the number of elements the set can hold /// @@ -75,9 +72,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let set = FnvIndexSet::::new(); + /// let set = FnvIndexSet::::new(); /// assert_eq!(set.capacity(), 16); /// ``` pub fn capacity(&self) -> usize { @@ -90,9 +86,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut set = FnvIndexSet::<_, U16>::new(); + /// let mut set = FnvIndexSet::<_, 16>::new(); /// set.insert("a").unwrap(); /// set.insert("b").unwrap(); /// @@ -114,30 +109,28 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect(); + /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Can be seen as `a - b`. /// for x in a.difference(&b) { /// println!("{}", x); // Print 1 /// } /// - /// let diff: FnvIndexSet<_, U16> = a.difference(&b).collect(); - /// assert_eq!(diff, [1].iter().collect::>()); + /// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect(); + /// assert_eq!(diff, [1].iter().collect::>()); /// /// // Note that difference is not symmetric, /// // and `b - a` means something else: - /// let diff: FnvIndexSet<_, U16> = b.difference(&a).collect(); - /// assert_eq!(diff, [4].iter().collect::>()); + /// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect(); + /// assert_eq!(diff, [4].iter().collect::>()); /// ``` - pub fn difference<'a, N2, S2>( + pub fn difference<'a, S2, const N2: usize>( &'a self, - other: &'a IndexSet, - ) -> Difference<'a, T, N2, S2> + other: &'a IndexSet, + ) -> Difference<'a, T, S2, N2> where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { Difference { @@ -153,28 +146,26 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect(); + /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 4 in that order order. /// for x in a.symmetric_difference(&b) { /// println!("{}", x); /// } /// - /// let diff1: FnvIndexSet<_, U16> = a.symmetric_difference(&b).collect(); - /// let diff2: FnvIndexSet<_, U16> = b.symmetric_difference(&a).collect(); + /// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect(); + /// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect(); /// /// assert_eq!(diff1, diff2); - /// assert_eq!(diff1, [1, 4].iter().collect::>()); + /// assert_eq!(diff1, [1, 4].iter().collect::>()); /// ``` - pub fn symmetric_difference<'a, N2, S2>( + pub fn symmetric_difference<'a, S2, const N2: usize>( &'a self, - other: &'a IndexSet, + other: &'a IndexSet, ) -> impl Iterator where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { self.difference(other).chain(other.difference(self)) @@ -187,25 +178,23 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect(); + /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 2, 3 in that order. /// for x in a.intersection(&b) { /// println!("{}", x); /// } /// - /// let intersection: FnvIndexSet<_, U16> = a.intersection(&b).collect(); - /// assert_eq!(intersection, [2, 3].iter().collect::>()); + /// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect(); + /// assert_eq!(intersection, [2, 3].iter().collect::>()); /// ``` - pub fn intersection<'a, N2, S2>( + pub fn intersection<'a, S2, const N2: usize>( &'a self, - other: &'a IndexSet, - ) -> Intersection<'a, T, N2, S2> + other: &'a IndexSet, + ) -> Intersection<'a, T, S2, N2> where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { Intersection { @@ -221,25 +210,23 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect(); + /// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect(); /// /// // Print 1, 2, 3, 4 in that order. /// for x in a.union(&b) { /// println!("{}", x); /// } /// - /// let union: FnvIndexSet<_, U16> = a.union(&b).collect(); - /// assert_eq!(union, [1, 2, 3, 4].iter().collect::>()); + /// let union: FnvIndexSet<_, 16> = a.union(&b).collect(); + /// assert_eq!(union, [1, 2, 3, 4].iter().collect::>()); /// ``` - pub fn union<'a, N2, S2>( + pub fn union<'a, S2, const N2: usize>( &'a self, - other: &'a IndexSet, + other: &'a IndexSet, ) -> impl Iterator where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { self.iter().chain(other.difference(self)) @@ -251,9 +238,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new(); + /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert_eq!(v.len(), 0); /// v.insert(1).unwrap(); /// assert_eq!(v.len(), 1); @@ -268,9 +254,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new(); + /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// assert!(v.is_empty()); /// v.insert(1).unwrap(); /// assert!(!v.is_empty()); @@ -285,9 +270,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new(); + /// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new(); /// v.insert(1).unwrap(); /// v.clear(); /// assert!(v.is_empty()); @@ -305,9 +289,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let set: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); + /// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); /// assert_eq!(set.contains(&1), true); /// assert_eq!(set.contains(&4), false); /// ``` @@ -326,10 +309,9 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut b = FnvIndexSet::<_, U16>::new(); + /// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut b = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(a.is_disjoint(&b), true); /// b.insert(4).unwrap(); @@ -337,9 +319,8 @@ where /// b.insert(1).unwrap(); /// assert_eq!(a.is_disjoint(&b), false); /// ``` - pub fn is_disjoint(&self, other: &IndexSet) -> bool + pub fn is_disjoint(&self, other: &IndexSet) -> bool where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { self.iter().all(|v| !other.contains(v)) @@ -352,10 +333,9 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let sup: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect(); - /// let mut set = FnvIndexSet::<_, U16>::new(); + /// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect(); + /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_subset(&sup), true); /// set.insert(2).unwrap(); @@ -363,9 +343,8 @@ where /// set.insert(4).unwrap(); /// assert_eq!(set.is_subset(&sup), false); /// ``` - pub fn is_subset(&self, other: &IndexSet) -> bool + pub fn is_subset(&self, other: &IndexSet) -> bool where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { self.iter().all(|v| other.contains(v)) @@ -378,10 +357,9 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let sub: FnvIndexSet<_, U16> = [1, 2].iter().cloned().collect(); - /// let mut set = FnvIndexSet::<_, U16>::new(); + /// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect(); + /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.is_superset(&sub), false); /// @@ -392,9 +370,8 @@ where /// set.insert(2).unwrap(); /// assert_eq!(set.is_superset(&sub), true); /// ``` - pub fn is_superset(&self, other: &IndexSet) -> bool + pub fn is_superset(&self, other: &IndexSet) -> bool where - N2: ArrayLength> + ArrayLength>, S2: BuildHasher, { other.is_subset(self) @@ -410,9 +387,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut set = FnvIndexSet::<_, U16>::new(); + /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// assert_eq!(set.insert(2).unwrap(), true); /// assert_eq!(set.insert(2).unwrap(), false); @@ -434,9 +410,8 @@ where /// /// ``` /// use heapless::FnvIndexSet; - /// use heapless::consts::*; /// - /// let mut set = FnvIndexSet::<_, U16>::new(); + /// let mut set = FnvIndexSet::<_, 16>::new(); /// /// set.insert(2).unwrap(); /// assert_eq!(set.remove(&2), true); @@ -451,11 +426,10 @@ where } } -impl Clone for IndexSet +impl Clone for IndexSet where T: Eq + Hash + Clone, S: Clone, - N: ArrayLength> + ArrayLength>, { fn clone(&self) -> Self { Self { @@ -464,22 +438,20 @@ where } } -impl fmt::Debug for IndexSet +impl fmt::Debug for IndexSet where T: Eq + Hash + fmt::Debug, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() } } -impl Default for IndexSet +impl Default for IndexSet where T: Eq + Hash, S: BuildHasher + Default, - N: ArrayLength> + ArrayLength>, { fn default() -> Self { IndexSet { @@ -488,24 +460,22 @@ where } } -impl PartialEq> for IndexSet +impl PartialEq> + for IndexSet where T: Eq + Hash, S1: BuildHasher, S2: BuildHasher, - N1: ArrayLength> + ArrayLength>, - N2: ArrayLength> + ArrayLength>, { - fn eq(&self, other: &IndexSet) -> bool { + fn eq(&self, other: &IndexSet) -> bool { self.len() == other.len() && self.is_subset(other) } } -impl Extend for IndexSet +impl Extend for IndexSet where T: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { fn extend(&mut self, iterable: I) where @@ -515,11 +485,10 @@ where } } -impl<'a, T, N, S> Extend<&'a T> for IndexSet +impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet where T: 'a + Eq + Hash + Copy, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { fn extend(&mut self, iterable: I) where @@ -529,11 +498,10 @@ where } } -impl FromIterator for IndexSet +impl FromIterator for IndexSet where T: Eq + Hash, S: BuildHasher + Default, - N: ArrayLength> + ArrayLength>, { fn from_iter(iter: I) -> Self where @@ -545,11 +513,10 @@ where } } -impl<'a, T, N, S> IntoIterator for &'a IndexSet +impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet where T: Eq + Hash, S: BuildHasher, - N: ArrayLength> + ArrayLength>, { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -579,21 +546,19 @@ impl<'a, T> Clone for Iter<'a, T> { } } -pub struct Difference<'a, T, N, S> +pub struct Difference<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, - N: ArrayLength> + ArrayLength>, { iter: Iter<'a, T>, - other: &'a IndexSet, + other: &'a IndexSet, } -impl<'a, T, N, S> Iterator for Difference<'a, T, N, S> +impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, - N: ArrayLength> + ArrayLength>, { type Item = &'a T; @@ -607,21 +572,19 @@ where } } -pub struct Intersection<'a, T, N, S> +pub struct Intersection<'a, T, S, const N: usize> where S: BuildHasher, T: Eq + Hash, - N: ArrayLength> + ArrayLength>, { iter: Iter<'a, T>, - other: &'a IndexSet, + other: &'a IndexSet, } -impl<'a, T, N, S> Iterator for Intersection<'a, T, N, S> +impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N> where S: BuildHasher, T: Eq + Hash, - N: ArrayLength> + ArrayLength>, { type Item = &'a T; diff --git a/src/lib.rs b/src/lib.rs index 4d95fe2427..612ecc0210 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -10,17 +10,14 @@ //! //! ``` //! use heapless::Vec; // fixed capacity `std::Vec` -//! use heapless::consts::U8; // type level integer used to specify capacity //! //! // on the stack -//! let mut xs: Vec = Vec::new(); // can hold up to 8 elements +//! let mut xs: Vec = Vec::new(); // can hold up to 8 elements //! xs.push(42).unwrap(); //! assert_eq!(xs.pop(), Some(42)); //! //! // in a `static` variable -//! // (because `const-fn` has not been fully stabilized you need to use the helper structs in -//! // the `i` module, which must be wrapped in a tuple struct) -//! static mut XS: Vec = Vec(heapless::i::Vec::new()); +//! static mut XS: Vec = Vec::new(); //! //! let xs = unsafe { &mut XS }; //! @@ -28,7 +25,7 @@ //! assert_eq!(xs.pop(), Some(42)); //! //! // in the heap (though kind of pointless because no reallocation) -//! let mut ys: Box> = Box::new(Vec::new()); +//! let mut ys: Box> = Box::new(Vec::new()); //! ys.push(42).unwrap(); //! assert_eq!(ys.pop(), Some(42)); //! ``` @@ -69,29 +66,32 @@ //! This crate is guaranteed to compile on stable Rust 1.36 and up with its default set of features. //! It *might* compile on older versions but that may change in any new patch release. +// experimental usage of const generics, requires nightly 2020-08-18 (or newer) +#![feature(min_const_generics)] +#![feature(const_fn)] #![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] -#![deny(warnings)] +// #![deny(warnings)] pub use binary_heap::BinaryHeap; -pub use generic_array::typenum::{consts, PowerOfTwo}; -pub use generic_array::ArrayLength; +// pub use generic_array::typenum::{consts, PowerOfTwo}; +// pub use generic_array::ArrayLength; +pub use histbuf::HistoryBuffer; pub use indexmap::{Bucket, FnvIndexMap, IndexMap, Pos}; pub use indexset::{FnvIndexSet, IndexSet}; pub use linear_map::LinearMap; pub use string::String; pub use vec::Vec; -pub use histbuf::HistoryBuffer; // NOTE this code was last ported from v0.4.1 of the indexmap crate +mod histbuf; mod indexmap; mod indexset; mod linear_map; mod string; mod vec; -mod histbuf; #[cfg(feature = "serde")] mod de; @@ -99,10 +99,10 @@ mod de; mod ser; pub mod binary_heap; -pub mod i; +// pub mod i; #[cfg(all(has_cas, feature = "cas"))] pub mod mpmc; -#[cfg(all(has_cas, feature = "cas"))] +// #[cfg(all(has_cas, feature = "cas"))] pub mod pool; #[cfg(has_atomics)] pub mod spsc; diff --git a/src/linear_map.rs b/src/linear_map.rs index a1b8d9515b..35cfdd8a4b 100644 --- a/src/linear_map.rs +++ b/src/linear_map.rs @@ -6,31 +6,21 @@ use core::{ ops, ptr, slice, }; -use generic_array::{ArrayLength, GenericArray}; - use crate::Vec; /// A fixed capacity map / dictionary that performs lookups via linear search /// /// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1) -pub struct LinearMap(#[doc(hidden)] pub crate::i::LinearMap>) -where - N: ArrayLength<(K, V)>, - K: Eq; -impl crate::i::LinearMap { - /// `LinearMap` `const` constructor; wrap the returned value in - /// [`LinearMap`](../struct.LinearMap.html) - pub const fn new() -> Self { - Self { - buffer: crate::i::Vec::new(), - } - } +pub struct LinearMap +where + K: Eq, +{ + pub(crate) buffer: Vec<(K, V), N>, } -impl LinearMap +impl LinearMap where - N: ArrayLength<(K, V)>, K: Eq, { /// Creates an empty `LinearMap` @@ -39,16 +29,15 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// /// // allocate the map on the stack - /// let mut map: LinearMap<&str, isize, U8> = LinearMap::new(); + /// let mut map: LinearMap<&str, isize, 8> = LinearMap::new(); /// /// // allocate the map in a static variable - /// static mut MAP: LinearMap<&str, isize, U8> = LinearMap(heapless::i::LinearMap::new()); + /// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new(); /// ``` - pub fn new() -> Self { - LinearMap(crate::i::LinearMap::new()) + pub const fn new() -> Self { + Self { buffer: Vec::new() } } /// Returns the number of elements that the map can hold @@ -59,13 +48,12 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let map: LinearMap<&str, isize, U8> = LinearMap::new(); + /// let map: LinearMap<&str, isize, 8> = LinearMap::new(); /// assert_eq!(map.capacity(), 8); /// ``` pub fn capacity(&self) -> usize { - N::to_usize() + N } /// Clears the map, removing all key-value pairs @@ -76,15 +64,14 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// map.clear(); /// assert!(map.is_empty()); /// ``` pub fn clear(&mut self) { - self.0.buffer.clear() + self.buffer.clear() } /// Returns true if the map contains a value for the specified key. @@ -95,9 +82,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.contains_key(&1), true); /// assert_eq!(map.contains_key(&2), false); @@ -114,9 +100,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.get(&1), Some(&"a")); /// assert_eq!(map.get(&2), None); @@ -139,9 +124,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// if let Some(x) = map.get_mut(&1) { /// *x = "b"; @@ -166,15 +150,14 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut a: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(a.len(), 0); /// a.insert(1, "a").unwrap(); /// assert_eq!(a.len(), 1); /// ``` pub fn len(&self) -> usize { - self.0.buffer.len + self.buffer.len() } /// Inserts a key-value pair into the map. @@ -189,9 +172,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// assert_eq!(map.insert(37, "a").unwrap(), None); /// assert_eq!(map.is_empty(), false); /// @@ -205,7 +187,7 @@ where return Ok(Some(value)); } - self.0.buffer.push((key, value))?; + self.buffer.push((key, value))?; Ok(None) } @@ -217,9 +199,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut a: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut a: LinearMap<_, _, 8> = LinearMap::new(); /// assert!(a.is_empty()); /// a.insert(1, "a").unwrap(); /// assert!(!a.is_empty()); @@ -234,9 +215,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -247,7 +227,7 @@ where /// ``` pub fn iter(&self) -> Iter<'_, K, V> { Iter { - iter: self.0.buffer.as_slice().iter(), + iter: self.buffer.as_slice().iter(), } } @@ -258,9 +238,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -276,7 +255,7 @@ where /// ``` pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { IterMut { - iter: self.0.buffer.as_mut_slice().iter_mut(), + iter: self.buffer.as_mut_slice().iter_mut(), } } @@ -286,9 +265,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -310,9 +288,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert(1, "a").unwrap(); /// assert_eq!(map.remove(&1), Some("a")); /// assert_eq!(map.remove(&1), None); @@ -328,7 +305,7 @@ where .find(|&(_, k)| k.borrow() == key) .map(|(idx, _)| idx); - idx.map(|idx| self.0.buffer.swap_remove(idx).1) + idx.map(|idx| self.buffer.swap_remove(idx).1) } /// An iterator visiting all values in arbitrary order @@ -337,9 +314,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -358,9 +334,8 @@ where /// /// ``` /// use heapless::LinearMap; - /// use heapless::consts::*; /// - /// let mut map: LinearMap<_, _, U8> = LinearMap::new(); + /// let mut map: LinearMap<_, _, 8> = LinearMap::new(); /// map.insert("a", 1).unwrap(); /// map.insert("b", 2).unwrap(); /// map.insert("c", 3).unwrap(); @@ -378,9 +353,8 @@ where } } -impl<'a, K, V, N, Q> ops::Index<&'a Q> for LinearMap +impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap where - N: ArrayLength<(K, V)>, K: Borrow + Eq, Q: Eq + ?Sized, { @@ -391,9 +365,8 @@ where } } -impl<'a, K, V, N, Q> ops::IndexMut<&'a Q> for LinearMap +impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap where - N: ArrayLength<(K, V)>, K: Borrow + Eq, Q: Eq + ?Sized, { @@ -402,9 +375,8 @@ where } } -impl Default for LinearMap +impl Default for LinearMap where - N: ArrayLength<(K, V)>, K: Eq, { fn default() -> Self { @@ -412,22 +384,20 @@ where } } -impl Clone for LinearMap +impl Clone for LinearMap where - N: ArrayLength<(K, V)>, K: Eq + Clone, V: Clone, { fn clone(&self) -> Self { - Self(crate::i::LinearMap { - buffer: self.0.buffer.clone(), - }) + Self { + buffer: self.buffer.clone(), + } } } -impl fmt::Debug for LinearMap +impl fmt::Debug for LinearMap where - N: ArrayLength<(K, V)>, K: Eq + fmt::Debug, V: fmt::Debug, { @@ -436,9 +406,8 @@ where } } -impl FromIterator<(K, V)> for LinearMap +impl FromIterator<(K, V)> for LinearMap where - N: ArrayLength<(K, V)>, K: Eq, { fn from_iter(iter: I) -> Self @@ -446,22 +415,20 @@ where I: IntoIterator, { let mut out = Self::new(); - out.0.buffer.extend(iter); + out.buffer.extend(iter); out } } -pub struct IntoIter +pub struct IntoIter where - N: ArrayLength<(K, V)>, K: Eq, { inner: as IntoIterator>::IntoIter, } -impl Iterator for IntoIter +impl Iterator for IntoIter where - N: ArrayLength<(K, V)>, K: Eq, { type Item = (K, V); @@ -470,28 +437,27 @@ where } } -impl IntoIterator for LinearMap +// TODO: Why is this needed at all, no example, no test... I don't get it +// impl IntoIterator for LinearMap +// where +// K: Eq, +// { +// type Item = (K, V); +// type IntoIter = IntoIter; + +// fn into_iter(mut self) -> Self::IntoIter { +// // FIXME this may result in a memcpy at runtime +// let lm = mem::replace(&mut self, unsafe { MaybeUninit::uninit().assume_init() }); +// mem::forget(self); + +// Self::IntoIter { +// inner: lm.buffer.into_iter(), +// } +// } +// } + +impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap where - N: ArrayLength<(K, V)>, - K: Eq, -{ - type Item = (K, V); - type IntoIter = IntoIter; - - fn into_iter(mut self) -> Self::IntoIter { - // FIXME this may result in a memcpy at runtime - let lm = mem::replace(&mut self.0, unsafe { MaybeUninit::uninit().assume_init() }); - mem::forget(self); - - Self::IntoIter { - inner: crate::Vec(lm.buffer).into_iter(), - } - } -} - -impl<'a, K, V, N> IntoIterator for &'a LinearMap -where - N: ArrayLength<(K, V)>, K: Eq, { type Item = (&'a K, &'a V); @@ -522,13 +488,15 @@ impl<'a, K, V> Clone for Iter<'a, K, V> { } } -impl Drop for LinearMap +impl Drop for LinearMap where - N: ArrayLength<(K, V)>, K: Eq, { fn drop(&mut self) { - unsafe { ptr::drop_in_place(self.0.buffer.as_mut_slice()) } + // heapless::Vec implements drop right? + drop(&self.buffer); + // original code below + // unsafe { ptr::drop_in_place(self.buffer.as_mut_slice()) } } } @@ -544,12 +512,10 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> { } } -impl PartialEq> for LinearMap +impl PartialEq> for LinearMap where K: Eq, V: PartialEq, - N: ArrayLength<(K, V)>, - N2: ArrayLength<(K, V)>, { fn eq(&self, other: &LinearMap) -> bool { self.len() == other.len() @@ -559,30 +525,29 @@ where } } -impl Eq for LinearMap +impl Eq for LinearMap where K: Eq, V: PartialEq, - N: ArrayLength<(K, V)>, { } #[cfg(test)] mod test { - use crate::{consts::*, LinearMap}; + use crate::LinearMap; #[test] fn static_new() { - static mut _L: LinearMap = LinearMap(crate::i::LinearMap::new()); + static mut _L: LinearMap = LinearMap::new(); } #[test] fn partial_eq() { { - let mut a = LinearMap::<_, _, U1>::new(); + let mut a = LinearMap::<_, _, 1>::new(); a.insert("k1", "v1").unwrap(); - let mut b = LinearMap::<_, _, U2>::new(); + let mut b = LinearMap::<_, _, 2>::new(); b.insert("k1", "v1").unwrap(); assert!(a == b); @@ -593,15 +558,17 @@ mod test { } { - let mut a = LinearMap::<_, _, U2>::new(); + let mut a = LinearMap::<_, _, 2>::new(); a.insert("k1", "v1").unwrap(); a.insert("k2", "v2").unwrap(); - let mut b = LinearMap::<_, _, U2>::new(); + let mut b = LinearMap::<_, _, 2>::new(); b.insert("k2", "v2").unwrap(); b.insert("k1", "v1").unwrap(); assert!(a == b); } } + + // TODO: drop test } diff --git a/src/sealed.rs b/src/sealed.rs index 3f68564b55..092485d187 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -2,7 +2,6 @@ pub mod spsc { #[cfg(has_atomics)] use crate::spsc::{MultiCore, SingleCore}; - #[cfg(has_atomics)] use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering}; diff --git a/src/ser.rs b/src/ser.rs index b050346462..32bc980c76 100644 --- a/src/ser.rs +++ b/src/ser.rs @@ -1,4 +1,6 @@ -use generic_array::{typenum::PowerOfTwo, ArrayLength}; +//! missing doc + +// use generic_array::{typenum::PowerOfTwo, ArrayLength}; use hash32::{BuildHasher, Hash}; use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; @@ -10,10 +12,9 @@ use crate::{ // Sequential containers -impl Serialize for BinaryHeap +impl Serialize for BinaryHeap where T: Ord + Serialize, - N: ArrayLength, KIND: BinaryHeapKind, { fn serialize(&self, serializer: S) -> Result @@ -28,11 +29,10 @@ where } } -impl Serialize for IndexSet +impl Serialize for IndexSet where T: Eq + Hash + Serialize, S: BuildHasher, - N: ArrayLength> + ArrayLength> + PowerOfTwo, { fn serialize(&self, serializer: SER) -> Result where @@ -46,10 +46,9 @@ where } } -impl Serialize for Vec +impl Serialize for Vec where T: Serialize, - N: ArrayLength, { fn serialize(&self, serializer: S) -> Result where @@ -65,10 +64,9 @@ where // Dictionaries -impl Serialize for IndexMap +impl Serialize for IndexMap where K: Eq + Hash + Serialize, - N: ArrayLength> + ArrayLength>, S: BuildHasher, V: Serialize, { @@ -84,9 +82,8 @@ where } } -impl Serialize for LinearMap +impl Serialize for LinearMap where - N: ArrayLength<(K, V)>, K: Eq + Serialize, V: Serialize, { @@ -104,10 +101,7 @@ where // String containers -impl Serialize for String -where - N: ArrayLength, -{ +impl Serialize for String { fn serialize(&self, serializer: S) -> Result where S: Serializer, diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index 2ec3d72f76..0aea97b0c0 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -9,9 +9,8 @@ //! //! ``` //! use heapless::spsc::Queue; -//! use heapless::consts::*; //! -//! let mut rb: Queue = Queue::new(); +//! let mut rb: Queue = Queue::new(); //! //! assert!(rb.enqueue(0).is_ok()); //! assert!(rb.enqueue(1).is_ok()); @@ -25,10 +24,11 @@ //! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode //! //! ``` -//! use heapless::spsc::Queue; -//! use heapless::consts::*; +//! use heapless::spsc::{Queue, MultiCore}; //! -//! static mut Q: Queue = Queue(heapless::i::Queue::new()); +//! // Notice, type signature needs to be explicit for now. +//! // (min_const_eval, does not allow for default type assignments) +//! static mut Q: Queue = Queue::new(); //! //! enum Event { A, B } //! @@ -85,7 +85,6 @@ use core::{cell::UnsafeCell, fmt, hash, marker::PhantomData, mem::MaybeUninit, ptr}; -use generic_array::{ArrayLength, GenericArray}; use hash32; use crate::sealed::spsc as sealed; @@ -161,23 +160,30 @@ where /// following constructors: `u8_sc`, `u16_sc`, `usize_sc` and `new_sc`. This variant is `unsafe` to /// create because the programmer must make sure that the queue's consumer and producer endpoints /// (if split) are kept on a single core for their entire lifetime. -pub struct Queue( - #[doc(hidden)] pub crate::i::Queue, U, C>, -) + +#[cfg(has_atomics)] +pub struct Queue where - N: ArrayLength, U: sealed::Uxx, - C: sealed::XCore; + C: sealed::XCore, +{ + // this is from where we dequeue items + pub(crate) head: Atomic, + + // this is where we enqueue new items + pub(crate) tail: Atomic, -impl Queue + pub(crate) buffer: MaybeUninit<[T; N]>, +} + +impl Queue where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { /// Returns the maximum number of elements the queue can hold pub fn capacity(&self) -> U { - U::saturate(N::to_usize()) + U::saturate(N) } /// Returns `true` if the queue has a length of 0 @@ -186,7 +192,7 @@ where } /// Iterates from the front of the queue to the back - pub fn iter(&self) -> Iter<'_, T, N, U, C> { + pub fn iter(&self) -> Iter<'_, T, U, C, N> { Iter { rb: self, index: 0, @@ -195,7 +201,7 @@ where } /// Returns an iterator that allows modifying each value. - pub fn iter_mut(&mut self) -> IterMut<'_, T, N, U, C> { + pub fn iter_mut(&mut self) -> IterMut<'_, T, U, C, N> { let len = self.len_usize(); IterMut { rb: self, @@ -205,16 +211,15 @@ where } fn len_usize(&self) -> usize { - let head = self.0.head.load_relaxed().into(); - let tail = self.0.tail.load_relaxed().into(); + let head = self.head.load_relaxed().into(); + let tail = self.tail.load_relaxed().into(); U::truncate(tail.wrapping_sub(head)).into() } } -impl Drop for Queue +impl Drop for Queue where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { @@ -227,9 +232,8 @@ where } } -impl fmt::Debug for Queue +impl fmt::Debug for Queue where - N: ArrayLength, T: fmt::Debug, U: sealed::Uxx, C: sealed::XCore, @@ -239,9 +243,8 @@ where } } -impl hash::Hash for Queue +impl hash::Hash for Queue where - N: ArrayLength, T: hash::Hash, U: sealed::Uxx, C: sealed::XCore, @@ -254,9 +257,8 @@ where } } -impl hash32::Hash for Queue +impl hash32::Hash for Queue where - N: ArrayLength, T: hash32::Hash, U: sealed::Uxx, C: sealed::XCore, @@ -269,28 +271,26 @@ where } } -impl<'a, T, N, U, C> IntoIterator for &'a Queue +impl<'a, T, U, C, const N: usize> IntoIterator for &'a Queue where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { type Item = &'a T; - type IntoIter = Iter<'a, T, N, U, C>; + type IntoIter = Iter<'a, T, U, C, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } -impl<'a, T, N, U, C> IntoIterator for &'a mut Queue +impl<'a, T, U, C, const N: usize> IntoIterator for &'a mut Queue where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { type Item = &'a mut T; - type IntoIter = IterMut<'a, T, N, U, C>; + type IntoIter = IterMut<'a, T, U, C, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() @@ -299,43 +299,21 @@ where macro_rules! impl_ { ($uxx:ident, $uxx_sc:ident) => { - impl Queue - where - N: ArrayLength, - { + impl Queue { /// Creates an empty queue with a fixed capacity of `N` - pub fn $uxx() -> Self { - Queue(crate::i::Queue::$uxx()) - } - } - - impl crate::i::Queue { - /// `spsc::Queue` `const` constructor; wrap the returned value in - /// [`spsc::Queue`](struct.Queue.html) pub const fn $uxx() -> Self { - crate::i::Queue { - buffer: MaybeUninit::uninit(), + Self { head: Atomic::new(0), tail: Atomic::new(0), + buffer: MaybeUninit::uninit(), } } } - impl Queue - where - N: ArrayLength, - { + impl Queue { /// Creates an empty queue with a fixed capacity of `N` (single core variant) - pub unsafe fn $uxx_sc() -> Self { - Queue(crate::i::Queue::$uxx_sc()) - } - } - - impl crate::i::Queue { - /// `spsc::Queue` `const` constructor; wrap the returned value in - /// [`spsc::Queue`](struct.Queue.html) pub const unsafe fn $uxx_sc() -> Self { - crate::i::Queue { + Self { buffer: MaybeUninit::uninit(), head: Atomic::new(0), tail: Atomic::new(0), @@ -343,9 +321,8 @@ macro_rules! impl_ { } } - impl Queue + impl Queue where - N: ArrayLength, C: sealed::XCore, { /// Returns a reference to the item in the front of the queue without dequeuing, or @@ -354,9 +331,8 @@ macro_rules! impl_ { /// # Examples /// ``` /// use heapless::spsc::Queue; - /// use heapless::consts::*; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::u8(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); @@ -367,10 +343,10 @@ macro_rules! impl_ { pub fn peek(&self) -> Option<&T> { let cap = self.capacity(); - let head = self.0.head.get(); - let tail = self.0.tail.get(); + let head = self.head.get(); + let tail = self.tail.get(); - let p = self.0.buffer.as_ptr(); + let p = self.buffer.as_ptr(); if *head != *tail { let item = unsafe { &*(p as *const T).add(usize::from(*head % cap)) }; @@ -384,10 +360,10 @@ macro_rules! impl_ { pub fn dequeue(&mut self) -> Option { let cap = self.capacity(); - let head = self.0.head.get_mut(); - let tail = self.0.tail.get_mut(); + let head = self.head.get_mut(); + let tail = self.tail.get_mut(); - let p = self.0.buffer.as_ptr(); + let p = self.buffer.as_ptr(); if *head != *tail { let item = unsafe { (p as *const T).add(usize::from(*head % cap)).read() }; @@ -403,8 +379,8 @@ macro_rules! impl_ { /// Returns back the `item` if the queue is full pub fn enqueue(&mut self, item: T) -> Result<(), T> { let cap = self.capacity(); - let head = *self.0.head.get_mut(); - let tail = *self.0.tail.get_mut(); + let head = *self.head.get_mut(); + let tail = *self.tail.get_mut(); if tail.wrapping_sub(head) > cap - 1 { Err(item) @@ -424,12 +400,12 @@ macro_rules! impl_ { /// twice. pub unsafe fn enqueue_unchecked(&mut self, item: T) { let cap = self.capacity(); - let tail = self.0.tail.get_mut(); + let tail = self.tail.get_mut(); // NOTE(ptr::write) the memory slot that we are about to write to is // uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the // uninitialized memory - (self.0.buffer.as_mut_ptr() as *mut T) + (self.buffer.as_mut_ptr() as *mut T) .add(usize::from(*tail % cap)) .write(item); *tail = tail.wrapping_add(1); @@ -437,24 +413,23 @@ macro_rules! impl_ { /// Returns the number of elements in the queue pub fn len(&self) -> $uxx { - let head = self.0.head.load_relaxed(); - let tail = self.0.tail.load_relaxed(); + let head = self.head.load_relaxed(); + let tail = self.tail.load_relaxed(); tail.wrapping_sub(head) } } - impl Clone for Queue + impl Clone for Queue where T: Clone, - N: ArrayLength, C: sealed::XCore, { fn clone(&self) -> Self { - let mut new: Queue = Queue(crate::i::Queue { + let mut new: Queue = Queue { buffer: MaybeUninit::uninit(), head: Atomic::new(0), tail: Atomic::new(0), - }); + }; for s in self.iter() { unsafe { @@ -469,39 +444,17 @@ macro_rules! impl_ { }; } -impl crate::i::Queue { - /// `spsc::Queue` `const` constructor; wrap the returned value in - /// [`spsc::Queue`](struct.Queue.html) - pub const fn new() -> Self { - crate::i::Queue::usize() - } -} - -impl Queue -where - N: ArrayLength, -{ +impl Queue { /// Alias for [`spsc::Queue::usize`](struct.Queue.html#method.usize) - pub fn new() -> Self { - Queue(crate::i::Queue::new()) - } -} - -impl crate::i::Queue { - /// `spsc::Queue` `const` constructor; wrap the returned value in - /// [`spsc::Queue`](struct.Queue.html) - pub const unsafe fn new_sc() -> Self { - crate::i::Queue::usize_sc() + pub const fn new() -> Self { + Queue::usize() } } -impl Queue -where - N: ArrayLength, -{ +impl Queue { /// Alias for [`spsc::Queue::usize_sc`](struct.Queue.html#method.usize_sc) pub unsafe fn new_sc() -> Self { - Queue(crate::i::Queue::new_sc()) + Queue::usize_sc() } } @@ -509,46 +462,42 @@ impl_!(u8, u8_sc); impl_!(u16, u16_sc); impl_!(usize, usize_sc); -impl PartialEq> for Queue +impl PartialEq> + for Queue where T: PartialEq, - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, - N2: ArrayLength, U2: sealed::Uxx, C2: sealed::XCore, { - fn eq(&self, other: &Queue) -> bool { + fn eq(&self, other: &Queue) -> bool { self.len_usize() == other.len_usize() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2) } } -impl Eq for Queue +impl Eq for Queue where T: Eq, - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { } /// An iterator over the items of a queue -pub struct Iter<'a, T, N, U, C> +pub struct Iter<'a, T, U, C, const N: usize> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { - rb: &'a Queue, + rb: &'a Queue, index: usize, len: usize, } -impl<'a, T, N, U, C> Clone for Iter<'a, T, N, U, C> +impl<'a, T, U, C, const N: usize> Clone for Iter<'a, T, U, C, N> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { @@ -562,22 +511,20 @@ where } /// A mutable iterator over the items of a queue -pub struct IterMut<'a, T, N, U, C> +pub struct IterMut<'a, T, U, C, const N: usize> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { - rb: &'a mut Queue, + rb: &'a mut Queue, index: usize, len: usize, } macro_rules! iterator { (struct $name:ident -> $elem:ty, $ptr:ty, $asptr:ident, $mkref:ident) => { - impl<'a, T, N, U, C> Iterator for $name<'a, T, N, U, C> + impl<'a, T, U, C, const N: usize> Iterator for $name<'a, T, U, C, N> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { @@ -585,10 +532,10 @@ macro_rules! iterator { fn next(&mut self) -> Option<$elem> { if self.index < self.len { - let head = self.rb.0.head.load_relaxed().into(); + let head = self.rb.head.load_relaxed().into(); let cap = self.rb.capacity().into(); - let ptr = self.rb.0.buffer.$asptr() as $ptr; + let ptr = self.rb.buffer.$asptr() as $ptr; let i = (head + self.index) % cap; self.index += 1; Some(unsafe { $mkref!(*ptr.offset(i as isize)) }) @@ -598,18 +545,17 @@ macro_rules! iterator { } } - impl<'a, T, N, U, C> DoubleEndedIterator for $name<'a, T, N, U, C> + impl<'a, T, U, C, const N: usize> DoubleEndedIterator for $name<'a, T, U, C, N> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { fn next_back(&mut self) -> Option<$elem> { if self.index < self.len { - let head = self.rb.0.head.load_relaxed().into(); + let head = self.rb.head.load_relaxed().into(); let cap = self.rb.capacity().into(); - let ptr = self.rb.0.buffer.$asptr() as $ptr; + let ptr = self.rb.buffer.$asptr() as $ptr; // self.len > 0, since it's larger than self.index > 0 let i = (head + self.len - 1) % cap; self.len -= 1; @@ -641,11 +587,21 @@ iterator!(struct IterMut -> &'a mut T, *mut T, as_mut_ptr, make_ref_mut); mod tests { use hash32::Hasher; - use crate::{consts::*, spsc::Queue}; + use crate::spsc::{MultiCore, Queue, SingleCore}; + + #[test] + fn static_usize_sc() { + static mut _Q: Queue = unsafe { Queue::usize_sc() }; + } + + #[test] + fn static_usize() { + static mut _Q: Queue = Queue::usize(); + } #[test] fn static_new() { - static mut _Q: Queue = Queue(crate::i::Queue::new()); + static mut _Q: Queue = Queue::new(); } #[test] @@ -671,7 +627,7 @@ mod tests { static mut COUNT: i32 = 0; { - let mut v: Queue = Queue::new(); + let mut v: Queue = unsafe { Queue::usize_sc() }; v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); v.dequeue().unwrap(); @@ -680,7 +636,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut v: Queue = Queue::new(); + let mut v: Queue = Queue::usize(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); } @@ -690,7 +646,7 @@ mod tests { #[test] fn full() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -702,7 +658,7 @@ mod tests { #[test] fn iter() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u16(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -718,7 +674,7 @@ mod tests { #[test] fn iter_double_ended() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -735,7 +691,7 @@ mod tests { #[test] fn iter_overflow() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); for _ in 0..300 { @@ -749,7 +705,7 @@ mod tests { #[test] fn iter_mut() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -765,7 +721,7 @@ mod tests { #[test] fn iter_mut_double_ended() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -782,21 +738,17 @@ mod tests { #[test] fn sanity() { - let mut rb: Queue = Queue::new(); - + let mut rb: Queue = Queue::u8(); assert_eq!(rb.dequeue(), None); - rb.enqueue(0).unwrap(); - assert_eq!(rb.dequeue(), Some(0)); - assert_eq!(rb.dequeue(), None); } #[test] #[cfg(feature = "smaller-atomics")] fn u8() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); for _ in 0..255 { rb.enqueue(0).unwrap(); @@ -807,7 +759,7 @@ mod tests { #[test] fn wrap_around() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -823,7 +775,7 @@ mod tests { #[test] fn ready_flag() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); let (mut p, mut c) = rb.split(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); @@ -851,7 +803,7 @@ mod tests { #[test] fn clone() { - let mut rb1: Queue = Queue::new(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -866,12 +818,12 @@ mod tests { fn eq() { // generate two queues with same content // but different buffer alignment - let mut rb1: Queue = Queue::new(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); - let mut rb2: Queue = Queue::new(); + let mut rb2: Queue = Queue::u8(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); assert!(rb1 == rb2); @@ -892,7 +844,7 @@ mod tests { // generate two queues with same content // but different buffer alignment let rb1 = { - let mut rb1: Queue = Queue::new(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -900,7 +852,7 @@ mod tests { rb1 }; let rb2 = { - let mut rb2: Queue = Queue::new(); + let mut rb2: Queue = Queue::u8(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); rb2 diff --git a/src/spsc/split.rs b/src/spsc/split.rs index 6676ac5681..abcb0ed812 100644 --- a/src/spsc/split.rs +++ b/src/spsc/split.rs @@ -1,20 +1,18 @@ use core::{marker::PhantomData, ptr::NonNull}; -use generic_array::ArrayLength; - use crate::{ sealed::spsc as sealed, - spsc::{MultiCore, Queue}, + spsc::Queue, + // spsc::{MultiCore, Queue}, // we cannot currently default to MultiCore }; -impl Queue +impl Queue where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { /// Splits a statically allocated queue into producer and consumer end points - pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, N, U, C>, Consumer<'rb, T, N, U, C>) { + pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, C, N>, Consumer<'rb, T, U, C, N>) { ( Producer { rb: unsafe { NonNull::new_unchecked(self) }, @@ -30,19 +28,17 @@ where /// A queue "consumer"; it can dequeue items from the queue // NOTE the consumer semantically owns the `head` pointer of the queue -pub struct Consumer<'a, T, N, U = usize, C = MultiCore> +pub struct Consumer<'a, T, U, C, const N: usize> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { - rb: NonNull>, + rb: NonNull>, _marker: PhantomData<&'a ()>, } -unsafe impl<'a, T, N, U, C> Send for Consumer<'a, T, N, U, C> +unsafe impl<'a, T, U, C, const N: usize> Send for Consumer<'a, T, U, C, N> where - N: ArrayLength, T: Send, U: sealed::Uxx, C: sealed::XCore, @@ -51,19 +47,17 @@ where /// A queue "producer"; it can enqueue items into the queue // NOTE the producer semantically owns the `tail` pointer of the queue -pub struct Producer<'a, T, N, U = usize, C = MultiCore> +pub struct Producer<'a, T, U, C, const N: usize> where - N: ArrayLength, U: sealed::Uxx, C: sealed::XCore, { - rb: NonNull>, + rb: NonNull>, _marker: PhantomData<&'a ()>, } -unsafe impl<'a, T, N, U, C> Send for Producer<'a, T, N, U, C> +unsafe impl<'a, T, U, C, const N: usize> Send for Producer<'a, T, U, C, N> where - N: ArrayLength, T: Send, U: sealed::Uxx, C: sealed::XCore, @@ -72,16 +66,15 @@ where macro_rules! impl_ { ($uxx:ident) => { - impl<'a, T, N, C> Consumer<'a, T, N, $uxx, C> + impl<'a, T, C, const N: usize> Consumer<'a, T, $uxx, C, N> where - N: ArrayLength, C: sealed::XCore, { /// Returns if there are any items to dequeue. When this returns true, at least the /// first subsequent dequeue will succeed. pub fn ready(&self) -> bool { - let head = unsafe { self.rb.as_ref().0.head.load_relaxed() }; - let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() }; // ▼ + let head = unsafe { self.rb.as_ref().head.load_relaxed() }; + let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼ return head != tail; } @@ -90,9 +83,8 @@ macro_rules! impl_ { /// # Examples /// ``` /// use heapless::spsc::Queue; - /// use heapless::consts::*; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::u8(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); @@ -101,8 +93,8 @@ macro_rules! impl_ { /// assert_eq!(None, consumer.peek()); /// ``` pub fn peek(&self) -> Option<&T> { - let head = unsafe { self.rb.as_ref().0.head.load_relaxed() }; - let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() }; + let head = unsafe { self.rb.as_ref().head.load_relaxed() }; + let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; if head != tail { Some(unsafe { self._peek(head) }) @@ -113,8 +105,8 @@ macro_rules! impl_ { /// Returns the item in the front of the queue, or `None` if the queue is empty pub fn dequeue(&mut self) -> Option { - let head = unsafe { self.rb.as_ref().0.head.load_relaxed() }; - let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() }; // ▼ + let head = unsafe { self.rb.as_ref().head.load_relaxed() }; + let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼ if head != tail { Some(unsafe { self._dequeue(head) }) // ▲ @@ -129,8 +121,8 @@ macro_rules! impl_ { /// /// If the queue is empty this is equivalent to calling `mem::uninitialized` pub unsafe fn dequeue_unchecked(&mut self) -> T { - let head = self.rb.as_ref().0.head.load_relaxed(); - debug_assert_ne!(head, self.rb.as_ref().0.tail.load_acquire()); + let head = self.rb.as_ref().head.load_relaxed(); + debug_assert_ne!(head, self.rb.as_ref().tail.load_acquire()); self._dequeue(head) // ▲ } @@ -139,7 +131,7 @@ macro_rules! impl_ { let cap = rb.capacity(); - let item = (rb.0.buffer.as_ptr() as *const T).add(usize::from(head % cap)); + let item = (rb.buffer.as_ptr() as *const T).add(usize::from(head % cap)); &*item } @@ -148,17 +140,16 @@ macro_rules! impl_ { let cap = rb.capacity(); - let item = (rb.0.buffer.as_ptr() as *const T) + let item = (rb.buffer.as_ptr() as *const T) .add(usize::from(head % cap)) .read(); - rb.0.head.store_release(head.wrapping_add(1)); // ▲ + rb.head.store_release(head.wrapping_add(1)); // ▲ item } } - impl<'a, T, N, C> Producer<'a, T, N, $uxx, C> + impl<'a, T, C, const N: usize> Producer<'a, T, $uxx, C, N> where - N: ArrayLength, C: sealed::XCore, { /// Returns if there is any space to enqueue a new item. When this returns true, at @@ -166,13 +157,13 @@ macro_rules! impl_ { pub fn ready(&self) -> bool { let cap = unsafe { self.rb.as_ref().capacity() }; - let tail = unsafe { self.rb.as_ref().0.tail.load_relaxed() }; + let tail = unsafe { self.rb.as_ref().tail.load_relaxed() }; // NOTE we could replace this `load_acquire` with a `load_relaxed` and this method // would be sound on most architectures but that change would result in UB according // to the C++ memory model, which is what Rust currently uses, so we err on the side // of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for // more details. - let head = unsafe { self.rb.as_ref().0.head.load_acquire() }; + let head = unsafe { self.rb.as_ref().head.load_acquire() }; return head.wrapping_add(cap) != tail; } @@ -181,13 +172,13 @@ macro_rules! impl_ { /// Returns back the `item` if the queue is full pub fn enqueue(&mut self, item: T) -> Result<(), T> { let cap = unsafe { self.rb.as_ref().capacity() }; - let tail = unsafe { self.rb.as_ref().0.tail.load_relaxed() }; + let tail = unsafe { self.rb.as_ref().tail.load_relaxed() }; // NOTE we could replace this `load_acquire` with a `load_relaxed` and this method // would be sound on most architectures but that change would result in UB according // to the C++ memory model, which is what Rust currently uses, so we err on the side // of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for // more details. - let head = unsafe { self.rb.as_ref().0.head.load_acquire() }; // ▼ + let head = unsafe { self.rb.as_ref().head.load_acquire() }; // ▼ if tail.wrapping_sub(head) > cap - 1 { Err(item) @@ -206,8 +197,8 @@ macro_rules! impl_ { /// to create a copy of `item`, which could result in `T`'s destructor running on `item` /// twice. pub unsafe fn enqueue_unchecked(&mut self, item: T) { - let tail = self.rb.as_ref().0.tail.load_relaxed(); - debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().0.head.load_acquire()); + let tail = self.rb.as_ref().tail.load_relaxed(); + debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().head.load_acquire()); self._enqueue(tail, item); // ▲ } @@ -219,10 +210,10 @@ macro_rules! impl_ { // NOTE(ptr::write) the memory slot that we are about to write to is // uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the // uninitialized memory - (rb.0.buffer.as_mut_ptr() as *mut T) + (rb.buffer.as_mut_ptr() as *mut T) .add(usize::from(tail % cap)) .write(item); - rb.0.tail.store_release(tail.wrapping_add(1)); // ▲ + rb.tail.store_release(tail.wrapping_add(1)); // ▲ } } }; @@ -234,11 +225,11 @@ impl_!(usize); #[cfg(test)] mod tests { - use crate::{consts::*, spsc::Queue}; + use crate::spsc::{MultiCore, Queue}; #[test] fn sanity() { - let mut rb: Queue = Queue::new(); + let mut rb: Queue = Queue::u8(); let (mut p, mut c) = rb.split(); diff --git a/src/string.rs b/src/string.rs index 90dc54e66d..d0076ee67f 100644 --- a/src/string.rs +++ b/src/string.rs @@ -7,32 +7,23 @@ use core::{ str::Utf8Error, }; -use generic_array::{ - typenum::{consts::*, IsGreaterOrEqual}, - ArrayLength, GenericArray, -}; use hash32; use crate::Vec; /// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html) -pub struct String(#[doc(hidden)] pub crate::i::String>) -where - N: ArrayLength; - -impl crate::i::String { - /// `String` `const` constructor; wrap the returned value in [`String`](../struct.String.html) - pub const fn new() -> Self { - Self { - vec: crate::i::Vec::new(), - } - } +pub struct String { + vec: Vec, } -impl String -where - N: ArrayLength, -{ +// impl String { +// /// `String` `const` constructor; wrap the returned value in [`String`](../struct.String.html) +// pub const fn new() -> Self { +// Self { vec: Vec::new() } +// } +// } + +impl String { /// Constructs a new, empty `String` with a fixed capacity of `N` /// /// # Examples @@ -41,17 +32,16 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// /// // allocate the string on the stack - /// let mut s: String = String::new(); + /// let mut s: String<4> = String::new(); /// /// // allocate the string in a static variable - /// static mut S: String = String(heapless::i::String::new()); + /// static mut S: String<4> = String::new(); /// ``` #[inline] - pub fn new() -> Self { - String(crate::i::String::new()) + pub const fn new() -> Self { + Self { vec: Vec::new() } } /// Converts a vector of bytes into a `String`. @@ -70,9 +60,8 @@ where /// /// ``` /// use heapless::{String, Vec}; - /// use heapless::consts::*; /// - /// let mut v: Vec = Vec::new(); + /// let mut v: Vec = Vec::new(); /// v.push('a' as u8).unwrap(); /// v.push('b' as u8).unwrap(); /// @@ -84,11 +73,10 @@ where /// /// ``` /// use heapless::{String, Vec}; - /// use heapless::consts::*; /// /// // some invalid bytes, in a vector /// - /// let mut v: Vec = Vec::new(); + /// let mut v: Vec = Vec::new(); /// v.push(0).unwrap(); /// v.push(159).unwrap(); /// v.push(146).unwrap(); @@ -110,9 +98,9 @@ where #[inline] pub unsafe fn from_utf8_unchecked(mut vec: Vec) -> String { // FIXME this may result in a memcpy at runtime - let vec_ = mem::replace(&mut vec.0, MaybeUninit::uninit().assume_init()); + let vec_ = mem::replace(&mut vec, MaybeUninit::uninit().assume_init()); mem::forget(vec); - String(crate::i::String { vec: vec_ }) + String { vec: vec_ } } /// Converts a `String` into a byte vector. @@ -125,9 +113,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let s: String = String::from("ab"); + /// let s: String<4> = String::from("ab"); /// let b = s.into_bytes(); /// assert!(b.len() == 2); /// @@ -135,7 +122,7 @@ where /// ``` #[inline] pub fn into_bytes(self) -> Vec { - Vec(self.0.vec) + self.vec } /// Extracts a string slice containing the entire string. @@ -146,9 +133,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("ab"); + /// let mut s: String<4> = String::from("ab"); /// assert!(s.as_str() == "ab"); /// /// let _s = s.as_str(); @@ -156,7 +142,7 @@ where /// ``` #[inline] pub fn as_str(&self) -> &str { - unsafe { str::from_utf8_unchecked(self.0.vec.as_slice()) } + unsafe { str::from_utf8_unchecked(self.vec.as_slice()) } } /// Converts a `String` into a mutable string slice. @@ -167,15 +153,14 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("ab"); + /// let mut s: String<4> = String::from("ab"); /// let s = s.as_mut_str(); /// s.make_ascii_uppercase(); /// ``` #[inline] pub fn as_mut_str(&mut self) -> &mut str { - unsafe { str::from_utf8_unchecked_mut(self.0.vec.as_mut_slice()) } + unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) } } /// Returns a mutable reference to the contents of this `String`. @@ -203,7 +188,7 @@ where /// assert_eq!(s, "olleh"); /// ``` pub unsafe fn as_mut_vec(&mut self) -> &mut Vec { - &mut *(&mut self.0.vec as *mut crate::i::Vec> as *mut Vec) + &mut self.vec } /// Appends a given string slice onto the end of this `String`. @@ -214,9 +199,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("foo"); + /// let mut s: String<8> = String::from("foo"); /// /// assert!(s.push_str("bar").is_ok()); /// @@ -226,7 +210,7 @@ where /// ``` #[inline] pub fn push_str(&mut self, string: &str) -> Result<(), ()> { - self.0.vec.extend_from_slice(string.as_bytes()) + self.vec.extend_from_slice(string.as_bytes()) } /// Returns the maximum number of elements the String can hold @@ -237,14 +221,13 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::new(); + /// let mut s: String<4> = String::new(); /// assert!(s.capacity() == 4); /// ``` #[inline] pub fn capacity(&self) -> usize { - self.0.vec.capacity() + self.vec.capacity() } /// Appends the given [`char`] to the end of this `String`. @@ -257,9 +240,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("abc"); + /// let mut s: String<8> = String::from("abc"); /// /// s.push('1').unwrap(); /// s.push('2').unwrap(); @@ -272,9 +254,8 @@ where #[inline] pub fn push(&mut self, c: char) -> Result<(), ()> { match c.len_utf8() { - 1 => self.0.vec.push(c as u8).map_err(|_| {}), + 1 => self.vec.push(c as u8).map_err(|_| {}), _ => self - .0 .vec .extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()), } @@ -300,9 +281,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("hello"); + /// let mut s: String<8> = String::from("hello"); /// /// s.truncate(2); /// @@ -312,7 +292,7 @@ where pub fn truncate(&mut self, new_len: usize) { if new_len <= self.len() { assert!(self.is_char_boundary(new_len)); - self.0.vec.truncate(new_len) + self.vec.truncate(new_len) } } @@ -328,9 +308,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("foo"); + /// let mut s: String<8> = String::from("foo"); /// /// assert_eq!(s.pop(), Some('o')); /// assert_eq!(s.pop(), Some('o')); @@ -344,7 +323,7 @@ where // pop bytes that correspond to `ch` for _ in 0..ch.len_utf8() { unsafe { - self.0.vec.pop_unchecked(); + self.vec.pop_unchecked(); } } @@ -362,9 +341,8 @@ where /// /// ``` /// use heapless::String; - /// use heapless::consts::*; /// - /// let mut s: String = String::from("foo"); + /// let mut s: String<8> = String::from("foo"); /// /// s.clear(); /// @@ -374,23 +352,17 @@ where /// ``` #[inline] pub fn clear(&mut self) { - self.0.vec.clear() + self.vec.clear() } } -impl Default for String -where - N: ArrayLength, -{ +impl Default for String { fn default() -> Self { Self::new() } } -impl<'a, N> From<&'a str> for String -where - N: ArrayLength, -{ +impl<'a, const N: usize> From<&'a str> for String { fn from(s: &'a str) -> Self { let mut new = String::new(); new.push_str(s).unwrap(); @@ -398,10 +370,7 @@ where } } -impl str::FromStr for String -where - N: ArrayLength, -{ +impl str::FromStr for String { type Err = (); fn from_str(s: &str) -> Result { @@ -411,59 +380,41 @@ where } } -impl Clone for String -where - N: ArrayLength, -{ +impl Clone for String { fn clone(&self) -> Self { - Self(crate::i::String { - vec: self.0.vec.clone(), - }) + Self { + vec: self.vec.clone(), + } } } -impl fmt::Debug for String -where - N: ArrayLength, -{ +impl fmt::Debug for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } -impl fmt::Display for String -where - N: ArrayLength, -{ +impl fmt::Display for String { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ::fmt(self, f) } } -impl hash::Hash for String -where - N: ArrayLength, -{ +impl hash::Hash for String { #[inline] fn hash(&self, hasher: &mut H) { ::hash(self, hasher) } } -impl hash32::Hash for String -where - N: ArrayLength, -{ +impl hash32::Hash for String { #[inline] fn hash(&self, hasher: &mut H) { ::hash(self, hasher) } } -impl fmt::Write for String -where - N: ArrayLength, -{ +impl fmt::Write for String { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s).map_err(|_| fmt::Error) } @@ -473,10 +424,7 @@ where } } -impl ops::Deref for String -where - N: ArrayLength, -{ +impl ops::Deref for String { type Target = str; fn deref(&self) -> &str { @@ -484,40 +432,27 @@ where } } -impl ops::DerefMut for String -where - N: ArrayLength, -{ +impl ops::DerefMut for String { fn deref_mut(&mut self) -> &mut str { self.as_mut_str() } } -impl AsRef for String -where - N: ArrayLength, -{ +impl AsRef for String { #[inline] fn as_ref(&self) -> &str { self } } -impl AsRef<[u8]> for String -where - N: ArrayLength, -{ +impl AsRef<[u8]> for String { #[inline] fn as_ref(&self) -> &[u8] { self.as_bytes() } } -impl PartialEq> for String -where - N1: ArrayLength, - N2: ArrayLength, -{ +impl PartialEq> for String { fn eq(&self, rhs: &String) -> bool { str::eq(&**self, &**rhs) } @@ -527,49 +462,99 @@ where } } -macro_rules! impl_eq { - ($lhs:ty, $rhs:ty) => { - impl<'a, 'b, N> PartialEq<$rhs> for $lhs - where - N: ArrayLength, - { - #[inline] - fn eq(&self, other: &$rhs) -> bool { - str::eq(&self[..], &other[..]) - } - #[inline] - fn ne(&self, other: &$rhs) -> bool { - str::ne(&self[..], &other[..]) - } - } +// macro_rules! impl_eq { +// ($lhs:ty, $rhs:ty) => { +// impl<'a, 'b, N> PartialEq<$rhs> for $lhs +// where +// N: ArrayLength, +// { +// #[inline] +// fn eq(&self, other: &$rhs) -> bool { +// str::eq(&self[..], &other[..]) +// } +// #[inline] +// fn ne(&self, other: &$rhs) -> bool { +// str::ne(&self[..], &other[..]) +// } +// } + +// impl<'a, 'b, N> PartialEq<$lhs> for $rhs +// where +// N: ArrayLength, +// { +// #[inline] +// fn eq(&self, other: &$lhs) -> bool { +// str::eq(&self[..], &other[..]) +// } +// #[inline] +// fn ne(&self, other: &$lhs) -> bool { +// str::ne(&self[..], &other[..]) +// } +// } +// }; +// } + +// String == str +impl PartialEq for String { + #[inline] + fn eq(&self, other: &str) -> bool { + str::eq(&self[..], &other[..]) + } + #[inline] + fn ne(&self, other: &str) -> bool { + str::ne(&self[..], &other[..]) + } +} - impl<'a, 'b, N> PartialEq<$lhs> for $rhs - where - N: ArrayLength, - { - #[inline] - fn eq(&self, other: &$lhs) -> bool { - str::eq(&self[..], &other[..]) - } - #[inline] - fn ne(&self, other: &$lhs) -> bool { - str::ne(&self[..], &other[..]) - } - } - }; +// String == &'str +impl PartialEq<&str> for String { + #[inline] + fn eq(&self, other: &&str) -> bool { + str::eq(&self[..], &other[..]) + } + #[inline] + fn ne(&self, other: &&str) -> bool { + str::ne(&self[..], &other[..]) + } } -impl_eq! { String, str } -impl_eq! { String, &'a str } +// str == String +impl PartialEq> for str { + #[inline] + fn eq(&self, other: &String) -> bool { + str::eq(&self[..], &other[..]) + } + #[inline] + fn ne(&self, other: &String) -> bool { + str::ne(&self[..], &other[..]) + } +} -impl Eq for String where N: ArrayLength {} +// &'str == String +impl PartialEq> for &str { + #[inline] + fn eq(&self, other: &String) -> bool { + str::eq(&self[..], &other[..]) + } + #[inline] + fn ne(&self, other: &String) -> bool { + str::ne(&self[..], &other[..]) + } +} + +impl Eq for String {} + +// impl From for String { +// fn from(s: D) -> Self { +// let mut new = String::new(); +// write!(&mut new, "{}", s).unwrap(); +// new +// } +// } macro_rules! impl_from_num { - ($num:ty, $size:ty) => { - impl From<$num> for String - where - N: ArrayLength + IsGreaterOrEqual<$size, Output = True>, - { + ($num:ty, $size:expr) => { + impl From<$num> for String { fn from(s: $num) -> Self { let mut new = String::new(); write!(&mut new, "{}", s).unwrap(); @@ -579,28 +564,28 @@ macro_rules! impl_from_num { }; } -impl_from_num!(i8, U4); -impl_from_num!(i16, U6); -impl_from_num!(i32, U11); -impl_from_num!(i64, U20); +impl_from_num!(i8, 4); +impl_from_num!(i16, 6); +impl_from_num!(i32, 11); +impl_from_num!(i64, 20); -impl_from_num!(u8, U3); -impl_from_num!(u16, U5); -impl_from_num!(u32, U10); -impl_from_num!(u64, U20); +impl_from_num!(u8, 3); +impl_from_num!(u16, 5); +impl_from_num!(u32, 10); +impl_from_num!(u64, 20); #[cfg(test)] mod tests { - use crate::{consts::*, String, Vec}; + use crate::{String, Vec}; #[test] fn static_new() { - static mut _S: String = String(crate::i::String::new()); + static mut _S: String<8> = String::new(); } #[test] fn clone() { - let s1: String = String::from("abcd"); + let s1: String<20> = String::from("abcd"); let mut s2 = s1.clone(); s2.push_str(" efgh").unwrap(); @@ -612,7 +597,7 @@ mod tests { fn debug() { use core::fmt::Write; - let s: String = String::from("abcd"); + let s: String<8> = String::from("abcd"); let mut std_s = std::string::String::new(); write!(std_s, "{:?}", s).unwrap(); assert_eq!("\"abcd\"", std_s); @@ -622,7 +607,7 @@ mod tests { fn display() { use core::fmt::Write; - let s: String = String::from("abcd"); + let s: String<8> = String::from("abcd"); let mut std_s = std::string::String::new(); write!(std_s, "{}", s).unwrap(); assert_eq!("abcd", std_s); @@ -630,7 +615,7 @@ mod tests { #[test] fn empty() { - let s: String = String::new(); + let s: String<4> = String::new(); assert!(s.capacity() == 4); assert_eq!(s, ""); assert_eq!(s.len(), 0); @@ -639,7 +624,7 @@ mod tests { #[test] fn from() { - let s: String = String::from("123"); + let s: String<4> = String::from("123"); assert!(s.len() == 3); assert_eq!(s, "123"); } @@ -648,23 +633,23 @@ mod tests { fn from_str() { use core::str::FromStr; - let s: String = String::::from_str("123").unwrap(); + let s: String<4> = String::<4>::from_str("123").unwrap(); assert!(s.len() == 3); assert_eq!(s, "123"); - let e: () = String::::from_str("123").unwrap_err(); + let e: () = String::<2>::from_str("123").unwrap_err(); assert_eq!(e, ()); } #[test] #[should_panic] fn from_panic() { - let _: String = String::from("12345"); + let _: String<4> = String::from("12345"); } #[test] fn from_utf8() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push('a' as u8).unwrap(); v.push('b' as u8).unwrap(); @@ -674,7 +659,7 @@ mod tests { #[test] fn from_utf8_uenc() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(240).unwrap(); v.push(159).unwrap(); v.push(146).unwrap(); @@ -685,7 +670,7 @@ mod tests { #[test] fn from_utf8_uenc_err() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(159).unwrap(); v.push(146).unwrap(); @@ -696,7 +681,7 @@ mod tests { #[test] fn from_utf8_unchecked() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(104).unwrap(); v.push(101).unwrap(); v.push(108).unwrap(); @@ -710,22 +695,21 @@ mod tests { #[test] fn from_num() { - let v = String::::from(18446744073709551615 as u64); - + let v: String<20> = String::from(18446744073709551615 as u64); assert_eq!(v, "18446744073709551615"); } #[test] fn into_bytes() { - let s: String = String::from("ab"); - let b: Vec = s.into_bytes(); + let s: String<4> = String::from("ab"); + let b: Vec = s.into_bytes(); assert_eq!(b.len(), 2); assert_eq!(&['a' as u8, 'b' as u8], &b[..]); } #[test] fn as_str() { - let s: String = String::from("ab"); + let s: String<4> = String::from("ab"); assert_eq!(s.as_str(), "ab"); // should be moved to fail test @@ -735,7 +719,7 @@ mod tests { #[test] fn as_mut_str() { - let mut s: String = String::from("ab"); + let mut s: String<4> = String::from("ab"); let s = s.as_mut_str(); s.make_ascii_uppercase(); assert_eq!(s, "AB"); @@ -743,16 +727,18 @@ mod tests { #[test] fn push_str() { - let mut s: String = String::from("foo"); + let mut s: String<8> = String::from("foo"); assert!(s.push_str("bar").is_ok()); assert_eq!("foobar", s); + assert_eq!(s, "foobar"); assert!(s.push_str("tender").is_err()); assert_eq!("foobar", s); + assert_eq!(s, "foobar"); } #[test] fn push() { - let mut s: String = String::from("abc"); + let mut s: String<6> = String::from("abc"); assert!(s.push('1').is_ok()); assert!(s.push('2').is_ok()); assert!(s.push('3').is_ok()); @@ -762,13 +748,13 @@ mod tests { #[test] fn as_bytes() { - let s: String = String::from("hello"); + let s: String<8> = String::from("hello"); assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes()); } #[test] fn truncate() { - let mut s: String = String::from("hello"); + let mut s: String<8> = String::from("hello"); s.truncate(6); assert_eq!(s.len(), 5); s.truncate(2); @@ -779,7 +765,7 @@ mod tests { #[test] fn pop() { - let mut s: String = String::from("foo"); + let mut s: String<8> = String::from("foo"); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('o')); assert_eq!(s.pop(), Some('f')); @@ -788,7 +774,7 @@ mod tests { #[test] fn pop_uenc() { - let mut s: String = String::from("é"); + let mut s: String<8> = String::from("é"); assert_eq!(s.len(), 3); match s.pop() { Some(c) => { @@ -802,7 +788,7 @@ mod tests { #[test] fn is_empty() { - let mut v: String = String::new(); + let mut v: String<8> = String::new(); assert!(v.is_empty()); let _ = v.push('a'); assert!(!v.is_empty()); @@ -810,7 +796,7 @@ mod tests { #[test] fn clear() { - let mut s: String = String::from("foo"); + let mut s: String<8> = String::from("foo"); s.clear(); assert!(s.is_empty()); assert_eq!(0, s.len()); diff --git a/src/ufmt.rs b/src/ufmt.rs index b3f30e6349..68cbcf7f99 100644 --- a/src/ufmt.rs +++ b/src/ufmt.rs @@ -1,25 +1,15 @@ use ufmt_write::uWrite; -use crate::{ - ArrayLength, - string::String, - vec::Vec, -}; +use crate::{string::String, vec::Vec}; -impl uWrite for String -where - N: ArrayLength, -{ +impl uWrite for String { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.push_str(s) } } -impl uWrite for Vec -where - N: ArrayLength, -{ +impl uWrite for Vec { type Error = (); fn write_str(&mut self, s: &str) -> Result<(), Self::Error> { self.extend_from_slice(s.as_bytes()) @@ -32,8 +22,6 @@ mod tests { use ufmt::{derive::uDebug, uwrite}; - use crate::consts::*; - #[derive(uDebug)] struct Pair { x: u32, @@ -45,7 +33,7 @@ mod tests { let a = 123; let b = Pair { x: 0, y: 1234 }; - let mut s = String::::new(); + let mut s = String::<32>::new(); uwrite!(s, "{} -> {:?}", a, b).unwrap(); assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }"); @@ -54,7 +42,7 @@ mod tests { #[test] fn test_string_err() { let p = Pair { x: 0, y: 1234 }; - let mut s = String::::new(); + let mut s = String::<4>::new(); assert!(uwrite!(s, "{:?}", p).is_err()); } @@ -63,7 +51,7 @@ mod tests { let a = 123; let b = Pair { x: 0, y: 1234 }; - let mut v = Vec::::new(); + let mut v = Vec::::new(); uwrite!(v, "{} -> {:?}", a, b).unwrap(); assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }"); diff --git a/src/vec.rs b/src/vec.rs index 65121224ef..789be35f77 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -1,146 +1,16 @@ use core::{fmt, hash, iter::FromIterator, mem::MaybeUninit, ops, ptr, slice}; - -use generic_array::{ArrayLength, GenericArray}; use hash32; -impl crate::i::Vec { - /// `Vec` `const` constructor; wrap the returned value in [`Vec`](../struct.Vec.html) - pub const fn new() -> Self { - Self { - buffer: MaybeUninit::uninit(), - len: 0, - } - } -} - -impl crate::i::Vec> -where - N: ArrayLength, -{ - pub(crate) fn as_slice(&self) -> &[T] { - // NOTE(unsafe) avoid bound checks in the slicing operation - // &buffer[..self.len] - unsafe { slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.len) } - } - - pub(crate) fn as_mut_slice(&mut self) -> &mut [T] { - // NOTE(unsafe) avoid bound checks in the slicing operation - // &mut buffer[..len] - unsafe { slice::from_raw_parts_mut(self.buffer.as_mut_ptr() as *mut T, self.len) } - } - - pub(crate) fn capacity(&self) -> usize { - N::to_usize() - } - - pub(crate) fn clear(&mut self) { - self.truncate(0); - } - - pub(crate) fn clone(&self) -> Self - where - T: Clone, - { - let mut new = Self::new(); - new.extend_from_slice(self.as_slice()).unwrap(); - new - } - - pub(crate) fn extend(&mut self, iter: I) - where - I: IntoIterator, - { - for elem in iter { - self.push(elem).ok().unwrap() - } - } - - pub(crate) fn extend_from_slice(&mut self, other: &[T]) -> Result<(), ()> - where - T: Clone, - { - if self.len + other.len() > self.capacity() { - // won't fit in the `Vec`; don't modify anything and return an error - Err(()) - } else { - for elem in other { - unsafe { - self.push_unchecked(elem.clone()); - } - } - Ok(()) - } - } - - pub(crate) fn is_full(&self) -> bool { - self.len == self.capacity() - } - - pub(crate) unsafe fn pop_unchecked(&mut self) -> T { - debug_assert!(!self.as_slice().is_empty()); - - self.len -= 1; - (self.buffer.as_ptr() as *const T).add(self.len).read() - } - - pub(crate) fn push(&mut self, item: T) -> Result<(), T> { - if self.len < self.capacity() { - unsafe { self.push_unchecked(item) } - Ok(()) - } else { - Err(item) - } - } - - pub(crate) unsafe fn push_unchecked(&mut self, item: T) { - // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We - // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory - (self.buffer.as_mut_ptr() as *mut T) - .add(self.len) - .write(item); - - self.len += 1; - } - - unsafe fn swap_remove_unchecked(&mut self, index: usize) -> T { - let length = self.len; - debug_assert!(index < length); - ptr::swap( - self.as_mut_slice().get_unchecked_mut(index), - self.as_mut_slice().get_unchecked_mut(length - 1), - ); - self.pop_unchecked() - } - - pub(crate) fn swap_remove(&mut self, index: usize) -> T { - assert!(index < self.len); - unsafe { self.swap_remove_unchecked(index) } - } - - pub(crate) fn truncate(&mut self, len: usize) { - unsafe { - // drop any extra elements - while len < self.len { - // decrement len before the drop_in_place(), so a panic on Drop - // doesn't re-drop the just-failed value. - self.len -= 1; - let len = self.len; - ptr::drop_in_place(self.as_mut_slice().get_unchecked_mut(len)); - } - } - } -} - /// A fixed capacity [`Vec`](https://doc.rust-lang.org/std/vec/struct.Vec.html) /// /// # Examples /// /// ``` /// use heapless::Vec; -/// use heapless::consts::*; +/// /// /// // A vector with a fixed capacity of 8 elements allocated on the stack -/// let mut vec = Vec::<_, U8>::new(); +/// let mut vec = Vec::<_, 8>::new(); /// vec.push(1); /// vec.push(2); /// @@ -158,45 +28,33 @@ where /// for x in &vec { /// println!("{}", x); /// } -/// assert_eq!(vec, [7, 1, 2, 3]); +/// assert_eq!(*vec, [7, 1, 2, 3]); /// ``` -// repr(transparent) is needed for [`String::as_mut_vec`] -#[repr(transparent)] -pub struct Vec(#[doc(hidden)] pub crate::i::Vec>) -where - N: ArrayLength; - -impl Clone for Vec -where - N: ArrayLength, - T: Clone, -{ - fn clone(&self) -> Self { - Vec(self.0.clone()) - } +pub struct Vec { + buffer: MaybeUninit<[T; N]>, + len: usize, } -impl Vec -where - N: ArrayLength, -{ - /* Constructors */ +impl Vec { /// Constructs a new, empty vector with a fixed capacity of `N` /// /// # Examples /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; /// /// // allocate the vector on the stack - /// let mut x: Vec = Vec::new(); + /// let mut x: Vec = Vec::new(); /// /// // allocate the vector in a static variable - /// static mut X: Vec = Vec(heapless::i::Vec::new()); + /// static mut X: Vec = Vec::new(); /// ``` - pub fn new() -> Self { - Vec(crate::i::Vec::new()) + /// `Vec` `const` constructor; wrap the returned value in [`Vec`](../struct.Vec.html) + pub const fn new() -> Self { + Self { + buffer: MaybeUninit::uninit(), + len: 0, + } } /// Constructs a new vector with a fixed capacity of `N` and fills it @@ -206,9 +64,8 @@ where /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; /// - /// let mut v: Vec = Vec::new(); + /// let mut v: Vec = Vec::new(); /// v.extend_from_slice(&[1, 2, 3]).unwrap(); /// ``` #[inline] @@ -221,15 +78,74 @@ where Ok(v) } - /* Public API */ - /// Returns the maximum number of elements the vector can hold - pub fn capacity(&self) -> usize { - self.0.capacity() + /// Clones a vec into a new vec + pub(crate) fn clone(&self) -> Self + where + T: Clone, + { + let mut new = Self::new(); + new.extend_from_slice(self.as_slice()).unwrap(); + new + } + + /// Extracts a slice containing the entire vector. + /// + /// Equivalent to `&s[..]`. + /// + /// # Examples + /// + /// ``` + /// use heapless::Vec; + /// let buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); + /// assert_eq!(buffer.as_slice(), &[1, 2, 3, 5, 8]); + /// ``` + pub fn as_slice(&self) -> &[T] { + // NOTE(unsafe) avoid bound checks in the slicing operation + // &buffer[..self.len] + unsafe { slice::from_raw_parts(self.buffer.as_ptr() as *const T, self.len) } + } + + /// Extracts a mutable slice containing the entire vector. + /// + /// Equivalent to `&s[..]`. + /// + /// # Examples + /// + /// ``` + /// use heapless::Vec; + /// let mut buffer: Vec = Vec::from_slice(&[1, 2, 3, 5, 8]).unwrap(); + /// buffer[0] = 9; + /// assert_eq!(buffer.as_slice(), &[9, 2, 3, 5, 8]); + /// ``` + pub(crate) fn as_mut_slice(&mut self) -> &mut [T] { + // NOTE(unsafe) avoid bound checks in the slicing operation + // &mut buffer[..self.len] + unsafe { slice::from_raw_parts_mut(self.buffer.as_mut_ptr() as *mut T, self.len) } + } + + /// Returns the maximum number of elements the vector can hold. + pub const fn capacity(&self) -> usize { + N } /// Clears the vector, removing all values. + // PER: Check if non drop types correctly optimized. pub fn clear(&mut self) { - self.0.clear() + self.truncate(0); + } + + /// Extends the vec from an iterator. + /// + /// # Panic + /// + /// Panics if the vec cannot hold all elements of the iterator. + pub fn extend(&mut self, iter: I) + where + I: IntoIterator, + { + for elem in iter { + self.push(elem).ok().unwrap() + } } /// Clones and appends all elements in a slice to the `Vec`. @@ -241,9 +157,8 @@ where /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; /// - /// let mut vec = Vec::::new(); + /// let mut vec = Vec::::new(); /// vec.push(1).unwrap(); /// vec.extend_from_slice(&[2, 3, 4]).unwrap(); /// assert_eq!(*vec, [1, 2, 3, 4]); @@ -252,13 +167,23 @@ where where T: Clone, { - self.0.extend_from_slice(other) + if self.len + other.len() > self.capacity() { + // won't fit in the `Vec`; don't modify anything and return an error + Err(()) + } else { + for elem in other { + unsafe { + self.push_unchecked(elem.clone()); + } + } + Ok(()) + } } - /// Removes the last element from a vector and return it, or `None` if it's empty + /// Removes the last element from a vector and returns it, or `None` if it's empty pub fn pop(&mut self) -> Option { - if self.0.len != 0 { - Some(unsafe { self.0.pop_unchecked() }) + if self.len != 0 { + Some(unsafe { self.pop_unchecked() }) } else { None } @@ -268,23 +193,53 @@ where /// /// Returns back the `item` if the vector is full pub fn push(&mut self, item: T) -> Result<(), T> { - self.0.push(item) + if self.len < self.capacity() { + unsafe { self.push_unchecked(item) } + Ok(()) + } else { + Err(item) + } } - pub(crate) unsafe fn push_unchecked(&mut self, item: T) { - self.0.push_unchecked(item) + /// Removes the last element from a vector and returns it + /// + /// # Safety + /// + /// This assumes the vec to have at least one element. + pub(crate) unsafe fn pop_unchecked(&mut self) -> T { + debug_assert!(!self.as_slice().is_empty()); + + self.len -= 1; + (self.buffer.as_ptr() as *const T).add(self.len).read() + } + + /// Appends an `item` to the back of the collection + /// + /// # Safety + /// + /// This assumes the vec is not full. + pub unsafe fn push_unchecked(&mut self, item: T) { + // NOTE(ptr::write) the memory slot that we are about to write to is uninitialized. We + // use `ptr::write` to avoid running `T`'s destructor on the uninitialized memory + debug_assert!(!self.is_full()); + (self.buffer.as_mut_ptr() as *mut T) + .add(self.len) + .write(item); + + self.len += 1; } /// Shortens the vector, keeping the first `len` elements and dropping the rest. + // PER: Check that non drop types are correctly optimized pub fn truncate(&mut self, len: usize) { unsafe { // drop any extra elements - while len < self.len() { + while len < self.len { // decrement len before the drop_in_place(), so a panic on Drop // doesn't re-drop the just-failed value. - self.0.len -= 1; - let len = self.len(); - ptr::drop_in_place(self.get_unchecked_mut(len)); + self.len -= 1; + let len = self.len; + ptr::drop_in_place(self.as_mut_slice().get_unchecked_mut(len)); } } } @@ -304,8 +259,8 @@ where return Err(()); } - if new_len > self.len() { - while self.len() < new_len { + if new_len > self.len { + while self.len < new_len { self.push(value.clone()).ok(); } } else { @@ -356,7 +311,6 @@ where /// ```no_run /// # #![allow(dead_code)] /// use heapless::Vec; - /// use heapless::consts::*; /// /// # // This is just a minimal skeleton for the doc example; /// # // don't use this as a starting point for a real library. @@ -370,7 +324,7 @@ where /// # ) -> i32; /// # } /// # impl StreamWrapper { - /// pub fn get_dictionary(&self) -> Option> { + /// pub fn get_dictionary(&self) -> Option> { /// // Per the FFI method's docs, "32768 bytes is always enough". /// let mut dict = Vec::new(); /// let mut dict_length = 0; @@ -399,9 +353,9 @@ where /// ``` /// use core::iter::FromIterator; /// use heapless::Vec; - /// use heapless::consts::*; /// - /// let mut vec = Vec::, U3>::from_iter( + /// + /// let mut vec = Vec::, 3>::from_iter( /// [ /// Vec::from_iter([1, 0, 0].iter().cloned()), /// Vec::from_iter([0, 1, 0].iter().cloned()), @@ -423,7 +377,7 @@ where pub unsafe fn set_len(&mut self, new_len: usize) { debug_assert!(new_len <= self.capacity()); - self.0.len = new_len + self.len = new_len } /// Removes an element from the vector and returns it. @@ -440,9 +394,9 @@ where /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; + ///// use heapless::consts::*; /// - /// let mut v: Vec<_, U8> = Vec::new(); + /// let mut v: Vec<_, 8> = Vec::new(); /// v.push("foo").unwrap(); /// v.push("bar").unwrap(); /// v.push("baz").unwrap(); @@ -455,15 +409,51 @@ where /// assert_eq!(&*v, ["baz", "qux"]); /// ``` pub fn swap_remove(&mut self, index: usize) -> T { - self.0.swap_remove(index) + assert!(index < self.len); + unsafe { self.swap_remove_unchecked(index) } } - pub(crate) unsafe fn swap_remove_unchecked(&mut self, index: usize) -> T { - self.0.swap_remove_unchecked(index) + /// Removes an element from the vector and returns it. + /// + /// The removed element is replaced by the last element of the vector. + /// + /// This does not preserve ordering, but is O(1). + /// + /// # Safety + /// + /// Assumes `index` within bounds. + /// + /// # Examples + /// + /// ``` + /// use heapless::Vec; + /// + /// let mut v: Vec<_, 8> = Vec::new(); + /// v.push("foo").unwrap(); + /// v.push("bar").unwrap(); + /// v.push("baz").unwrap(); + /// v.push("qux").unwrap(); + /// + /// assert_eq!(unsafe { v.swap_remove_unchecked(1) }, "bar"); + /// assert_eq!(&*v, ["foo", "qux", "baz"]); + /// + /// assert_eq!(unsafe { v.swap_remove_unchecked(0) }, "foo"); + /// assert_eq!(&*v, ["baz", "qux"]); + /// ``` + pub unsafe fn swap_remove_unchecked(&mut self, index: usize) -> T { + let length = self.len(); + debug_assert!(index < length); + ptr::swap( + self.as_mut_slice().get_unchecked_mut(index), + self.as_mut_slice().get_unchecked_mut(length - 1), + ); + self.pop_unchecked() } - pub(crate) fn is_full(&self) -> bool { - self.0.is_full() + /// Returns true if the vec is full + #[inline] + pub fn is_full(&self) -> bool { + self.len == self.capacity() } /// Returns `true` if `needle` is a prefix of the Vec. @@ -474,9 +464,8 @@ where /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; /// - /// let v: Vec<_, U8> = Vec::from_slice(b"abc").unwrap(); + /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.starts_with(b""), true); /// assert_eq!(v.starts_with(b"ab"), true); /// assert_eq!(v.starts_with(b"bc"), false); @@ -487,7 +476,7 @@ where T: PartialEq, { let n = needle.len(); - self.len() >= n && needle == &self[..n] + self.len >= n && needle == &self[..n] } /// Returns `true` if `needle` is a suffix of the Vec. @@ -498,9 +487,8 @@ where /// /// ``` /// use heapless::Vec; - /// use heapless::consts::*; /// - /// let v: Vec<_, U8> = Vec::from_slice(b"abc").unwrap(); + /// let v: Vec<_, 8> = Vec::from_slice(b"abc").unwrap(); /// assert_eq!(v.ends_with(b""), true); /// assert_eq!(v.ends_with(b"ab"), false); /// assert_eq!(v.ends_with(b"bc"), true); @@ -515,29 +503,24 @@ where } } -impl Default for Vec -where - N: ArrayLength, -{ +// Trait implementations + +impl Default for Vec { fn default() -> Self { Self::new() } } -impl fmt::Debug for Vec +impl fmt::Debug for Vec where T: fmt::Debug, - N: ArrayLength, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { <[T] as fmt::Debug>::fmt(self, f) } } -impl fmt::Write for Vec -where - N: ArrayLength, -{ +impl fmt::Write for Vec { fn write_str(&mut self, s: &str) -> fmt::Result { match self.extend_from_slice(s.as_bytes()) { Ok(()) => Ok(()), @@ -546,31 +529,28 @@ where } } -impl Drop for Vec -where - N: ArrayLength, -{ +// PER: Please check if non drop types are correctly optimized +impl Drop for Vec { fn drop(&mut self) { - unsafe { ptr::drop_in_place(&mut self[..]) } + // We drop each element used in the vector by turning into a &mut[T] + unsafe { + ptr::drop_in_place(self.as_mut_slice()); + } } } -impl Extend for Vec -where - N: ArrayLength, -{ +impl Extend for Vec { fn extend(&mut self, iter: I) where I: IntoIterator, { - self.0.extend(iter) + self.extend(iter) } } -impl<'a, T, N> Extend<&'a T> for Vec +impl<'a, T, const N: usize> Extend<&'a T> for Vec where T: 'a + Copy, - N: ArrayLength, { fn extend(&mut self, iter: I) where @@ -580,30 +560,25 @@ where } } -impl hash::Hash for Vec +impl hash::Hash for Vec where T: core::hash::Hash, - N: ArrayLength, { fn hash(&self, state: &mut H) { <[T] as hash::Hash>::hash(self, state) } } -impl hash32::Hash for Vec +impl hash32::Hash for Vec where T: hash32::Hash, - N: ArrayLength, { fn hash(&self, state: &mut H) { <[T] as hash32::Hash>::hash(self, state) } } -impl<'a, T, N> IntoIterator for &'a Vec -where - N: ArrayLength, -{ +impl<'a, T, const N: usize> IntoIterator for &'a Vec { type Item = &'a T; type IntoIter = slice::Iter<'a, T>; @@ -612,10 +587,7 @@ where } } -impl<'a, T, N> IntoIterator for &'a mut Vec -where - N: ArrayLength, -{ +impl<'a, T, const N: usize> IntoIterator for &'a mut Vec { type Item = &'a mut T; type IntoIter = slice::IterMut<'a, T>; @@ -624,10 +596,7 @@ where } } -impl FromIterator for Vec -where - N: ArrayLength, -{ +impl FromIterator for Vec { fn from_iter(iter: I) -> Self where I: IntoIterator, @@ -646,26 +615,16 @@ where /// /// [`Vec`]: (https://doc.rust-lang.org/std/vec/struct.Vec.html) /// -pub struct IntoIter -where - N: ArrayLength, -{ +pub struct IntoIter { vec: Vec, next: usize, } -impl Iterator for IntoIter -where - N: ArrayLength, -{ +impl Iterator for IntoIter { type Item = T; fn next(&mut self) -> Option { if self.next < self.vec.len() { - let item = unsafe { - (self.vec.0.buffer.as_ptr() as *const T) - .add(self.next) - .read() - }; + let item = unsafe { (self.vec.buffer.as_ptr() as *const T).add(self.next).read() }; self.next += 1; Some(item) } else { @@ -674,10 +633,9 @@ where } } -impl Clone for IntoIter +impl Clone for IntoIter where T: Clone, - N: ArrayLength, { fn clone(&self) -> Self { Self { @@ -687,24 +645,19 @@ where } } -impl Drop for IntoIter -where - N: ArrayLength, -{ +// PER: is this correct +impl Drop for IntoIter { fn drop(&mut self) { unsafe { // Drop all the elements that have not been moved out of vec - ptr::drop_in_place(&mut self.vec[self.next..]); + ptr::drop_in_place(&mut self.vec.as_mut_slice()[self.next..]); // Prevent dropping of other elements - self.vec.0.len = 0; + self.vec.len = 0; } } } -impl IntoIterator for Vec -where - N: ArrayLength, -{ +impl IntoIterator for Vec { type Item = T; type IntoIter = IntoIter; @@ -713,10 +666,8 @@ where } } -impl PartialEq> for Vec +impl PartialEq> for Vec where - N1: ArrayLength, - N2: ArrayLength, A: PartialEq, { fn eq(&self, other: &Vec) -> bool { @@ -724,114 +675,126 @@ where } } -macro_rules! eq { - ($Lhs:ty, $Rhs:ty) => { - impl<'a, 'b, A, B, N> PartialEq<$Rhs> for $Lhs - where - A: PartialEq, - N: ArrayLength, - { - fn eq(&self, other: &$Rhs) -> bool { - <[A]>::eq(self, &other[..]) - } - } - }; +// Vec == [B] +impl PartialEq<[B]> for Vec +where + A: PartialEq, +{ + fn eq(&self, other: &[B]) -> bool { + <[A]>::eq(self, &other[..]) + } } -eq!(Vec, [B]); -eq!(Vec, &'a [B]); -eq!(Vec, &'a mut [B]); - -macro_rules! array { - ($($N:expr),+) => { - $( - eq!(Vec, [B; $N]); - eq!(Vec, &'a [B; $N]); - )+ +// Vec == &[B] +impl PartialEq<&[B]> for Vec +where + A: PartialEq, +{ + fn eq(&self, other: &&[B]) -> bool { + <[A]>::eq(self, &other[..]) } } -array!( - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, - 26, 27, 28, 29, 30, 31, 32 -); +// Vec == &mut [B] +impl PartialEq<&mut [B]> for Vec +where + A: PartialEq, +{ + fn eq(&self, other: &&mut [B]) -> bool { + <[A]>::eq(self, &other[..]) + } +} -impl Eq for Vec +// Vec == [B; M] +// Equality does not require equal capacity +impl PartialEq<[B; M]> for Vec where - N: ArrayLength, - T: Eq, + A: PartialEq, { + fn eq(&self, other: &[B; M]) -> bool { + <[A]>::eq(self, &other[..]) + } } -impl ops::Deref for Vec +// Vec == &[B; M] +// Equality does not require equal capacity +impl PartialEq<&[B; M]> for Vec where - N: ArrayLength, + A: PartialEq, { + fn eq(&self, other: &&[B; M]) -> bool { + <[A]>::eq(self, &other[..]) + } +} + +// Implements Eq if underlying data is Eq +impl Eq for Vec where T: Eq {} + +impl ops::Deref for Vec { type Target = [T]; fn deref(&self) -> &[T] { - self.0.as_slice() + self.as_slice() } } -impl ops::DerefMut for Vec -where - N: ArrayLength, -{ +impl ops::DerefMut for Vec { fn deref_mut(&mut self) -> &mut [T] { - self.0.as_mut_slice() + self.as_mut_slice() } } -impl AsRef> for Vec -where - N: ArrayLength, -{ +impl AsRef> for Vec { #[inline] fn as_ref(&self) -> &Self { self } } -impl AsMut> for Vec -where - N: ArrayLength, -{ +impl AsMut> for Vec { #[inline] fn as_mut(&mut self) -> &mut Self { self } } -impl AsRef<[T]> for Vec -where - N: ArrayLength, -{ +impl AsRef<[T]> for Vec { #[inline] fn as_ref(&self) -> &[T] { self } } -impl AsMut<[T]> for Vec -where - N: ArrayLength, -{ +impl AsMut<[T]> for Vec { #[inline] fn as_mut(&mut self) -> &mut [T] { self } } +impl Clone for Vec +where + T: Clone, +{ + fn clone(&self) -> Self { + self.clone() + } +} + #[cfg(test)] mod tests { - use crate::{consts::*, Vec}; - use as_slice::AsSlice; + use crate::Vec; + // use as_slice::AsSlice; use core::fmt::Write; #[test] fn static_new() { - static mut _V: Vec = Vec(crate::i::Vec::new()); + static mut _V: Vec = Vec::new(); + } + + #[test] + fn stack_new() { + static mut _V: Vec = Vec::new(); } macro_rules! droppable { @@ -862,7 +825,7 @@ mod tests { droppable!(); { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); v.pop().unwrap(); @@ -871,7 +834,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(Droppable::new()).ok().unwrap(); v.push(Droppable::new()).ok().unwrap(); } @@ -881,8 +844,8 @@ mod tests { #[test] fn eq() { - let mut xs: Vec = Vec::new(); - let mut ys: Vec = Vec::new(); + let mut xs: Vec = Vec::new(); + let mut ys: Vec = Vec::new(); assert_eq!(xs, ys); @@ -894,7 +857,7 @@ mod tests { #[test] fn full() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); @@ -906,7 +869,7 @@ mod tests { #[test] fn iter() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); @@ -924,7 +887,7 @@ mod tests { #[test] fn iter_mut() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); @@ -943,20 +906,21 @@ mod tests { #[test] fn collect_from_iter() { let slice = &[1, 2, 3]; - let vec = slice.iter().cloned().collect::>(); - assert_eq!(vec, slice); + let vec: Vec = slice.iter().cloned().collect(); + // PER: Auto deref did not work + assert_eq!(vec.as_slice(), slice); } #[test] #[should_panic] fn collect_from_iter_overfull() { let slice = &[1, 2, 3]; - let _vec = slice.iter().cloned().collect::>(); + let _vec = slice.iter().cloned().collect::>(); } #[test] fn iter_move() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.push(0).unwrap(); v.push(1).unwrap(); v.push(2).unwrap(); @@ -976,7 +940,7 @@ mod tests { droppable!(); { - let mut vec: Vec = Vec::new(); + let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); @@ -988,7 +952,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut vec: Vec = Vec::new(); + let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let _items = vec.into_iter(); @@ -998,7 +962,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut vec: Vec = Vec::new(); + let mut vec: Vec = Vec::new(); vec.push(Droppable::new()).ok().unwrap(); vec.push(Droppable::new()).ok().unwrap(); let mut items = vec.into_iter(); @@ -1010,7 +974,7 @@ mod tests { #[test] fn push_and_pop() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); assert_eq!(v.pop(), None); @@ -1028,7 +992,7 @@ mod tests { #[test] fn resize_size_limit() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); v.resize(0, 0).unwrap(); v.resize(4, 0).unwrap(); @@ -1037,7 +1001,7 @@ mod tests { #[test] fn resize_length_cases() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); @@ -1064,7 +1028,7 @@ mod tests { #[test] fn resize_contents() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); // New entries take supplied value when growing v.resize(1, 17).unwrap(); @@ -1087,7 +1051,7 @@ mod tests { #[test] fn resize_default() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); // resize_default is implemented using resize, so just check the // correct value is being written. @@ -1097,14 +1061,14 @@ mod tests { #[test] fn write() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); write!(v, "{:x}", 1234).unwrap(); assert_eq!(&v[..], b"4d2"); } #[test] fn extend_from_slice() { - let mut v: Vec = Vec::new(); + let mut v: Vec = Vec::new(); assert_eq!(v.len(), 0); v.extend_from_slice(&[1, 2]).unwrap(); assert_eq!(v.len(), 2); @@ -1120,17 +1084,17 @@ mod tests { #[test] fn from_slice() { // Successful construction - let v: Vec = Vec::from_slice(&[1, 2, 3]).unwrap(); + let v: Vec = Vec::from_slice(&[1, 2, 3]).unwrap(); assert_eq!(v.len(), 3); assert_eq!(v.as_slice(), &[1, 2, 3]); // Slice too large - assert!(Vec::::from_slice(&[1, 2, 3]).is_err()); + assert!(Vec::::from_slice(&[1, 2, 3]).is_err()); } #[test] fn starts_with() { - let v: Vec<_, U8> = Vec::from_slice(b"ab").unwrap(); + let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.starts_with(&[])); assert!(v.starts_with(b"")); assert!(v.starts_with(b"a")); @@ -1142,7 +1106,7 @@ mod tests { #[test] fn ends_with() { - let v: Vec<_, U8> = Vec::from_slice(b"ab").unwrap(); + let v: Vec<_, 8> = Vec::from_slice(b"ab").unwrap(); assert!(v.ends_with(&[])); assert!(v.ends_with(b"")); assert!(v.ends_with(b"b")); diff --git a/tests/cpass.rs b/tests/cpass.rs index f834e9d1cb..775ffb3566 100644 --- a/tests/cpass.rs +++ b/tests/cpass.rs @@ -1,10 +1,6 @@ //! Collections of `Send`-able things are `Send` -use heapless::{ - consts, - spsc::{Consumer, Producer, Queue}, - Vec, HistoryBuffer, -}; +use heapless::Vec; #[test] fn send() { @@ -18,9 +14,9 @@ fn send() { { } - is_send::>(); - is_send::>(); - is_send::>(); - is_send::>(); - is_send::>(); + // is_send::>(); + // is_send::>(); + // is_send::>(); + is_send::>(); + // is_send::>(); } diff --git a/tests/tsan.rs b/tests/tsan.rs index cdef4df2e2..692b9ccd11 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -1,265 +1,265 @@ -#![deny(rust_2018_compatibility)] -#![deny(rust_2018_idioms)] -#![deny(warnings)] +// #![deny(rust_2018_compatibility)] +// #![deny(rust_2018_idioms)] +// #![deny(warnings)] -use std::{sync::mpsc, thread}; +// use std::{sync::mpsc, thread}; -use generic_array::typenum::Unsigned; -use heapless::{consts::*, mpmc::Q64, spsc}; -use scoped_threadpool::Pool; +// use generic_array::typenum::Unsigned; +// use heapless::{consts::*, mpmc::Q64, spsc}; +// use scoped_threadpool::Pool; -#[test] -fn once() { - static mut RB: spsc::Queue = spsc::Queue(heapless::i::Queue::new()); +// #[test] +// fn once() { +// static mut RB: spsc::Queue = spsc::Queue(heapless::i::Queue::new()); - let rb = unsafe { &mut RB }; +// let rb = unsafe { &mut RB }; - rb.enqueue(0).unwrap(); +// rb.enqueue(0).unwrap(); - let (mut p, mut c) = rb.split(); +// let (mut p, mut c) = rb.split(); - p.enqueue(1).unwrap(); +// p.enqueue(1).unwrap(); - thread::spawn(move || { - p.enqueue(1).unwrap(); - }); +// thread::spawn(move || { +// p.enqueue(1).unwrap(); +// }); - thread::spawn(move || { - c.dequeue().unwrap(); - }); -} +// thread::spawn(move || { +// c.dequeue().unwrap(); +// }); +// } -#[test] -fn twice() { - static mut RB: spsc::Queue = spsc::Queue(heapless::i::Queue::new()); +// #[test] +// fn twice() { +// static mut RB: spsc::Queue = spsc::Queue(heapless::i::Queue::new()); - let rb = unsafe { &mut RB }; +// let rb = unsafe { &mut RB }; - rb.enqueue(0).unwrap(); - rb.enqueue(1).unwrap(); +// rb.enqueue(0).unwrap(); +// rb.enqueue(1).unwrap(); - let (mut p, mut c) = rb.split(); +// let (mut p, mut c) = rb.split(); - thread::spawn(move || { - p.enqueue(2).unwrap(); - p.enqueue(3).unwrap(); - }); +// thread::spawn(move || { +// p.enqueue(2).unwrap(); +// p.enqueue(3).unwrap(); +// }); - thread::spawn(move || { - c.dequeue().unwrap(); - c.dequeue().unwrap(); - }); -} +// thread::spawn(move || { +// c.dequeue().unwrap(); +// c.dequeue().unwrap(); +// }); +// } -#[test] -fn scoped() { - let mut rb: spsc::Queue = spsc::Queue::new(); +// #[test] +// fn scoped() { +// let mut rb: spsc::Queue = spsc::Queue::new(); - rb.enqueue(0).unwrap(); +// rb.enqueue(0).unwrap(); - { - let (mut p, mut c) = rb.split(); +// { +// let (mut p, mut c) = rb.split(); - Pool::new(2).scoped(move |scope| { - scope.execute(move || { - p.enqueue(1).unwrap(); - }); +// Pool::new(2).scoped(move |scope| { +// scope.execute(move || { +// p.enqueue(1).unwrap(); +// }); - scope.execute(move || { - c.dequeue().unwrap(); - }); - }); - } +// scope.execute(move || { +// c.dequeue().unwrap(); +// }); +// }); +// } - rb.dequeue().unwrap(); -} +// rb.dequeue().unwrap(); +// } -#[test] -fn contention() { - type N = U1024; +// #[test] +// fn contention() { +// type N = U1024; - let mut rb: spsc::Queue = spsc::Queue::new(); +// let mut rb: spsc::Queue = spsc::Queue::new(); - { - let (mut p, mut c) = rb.split(); +// { +// let (mut p, mut c) = rb.split(); - Pool::new(2).scoped(move |scope| { - scope.execute(move || { - let mut sum: u32 = 0; +// Pool::new(2).scoped(move |scope| { +// scope.execute(move || { +// let mut sum: u32 = 0; - for i in 0..(2 * N::to_u32()) { - sum = sum.wrapping_add(i); - while let Err(_) = p.enqueue(i as u8) {} - } +// for i in 0..(2 * N::to_u32()) { +// sum = sum.wrapping_add(i); +// while let Err(_) = p.enqueue(i as u8) {} +// } - println!("producer: {}", sum); - }); +// println!("producer: {}", sum); +// }); - scope.execute(move || { - let mut sum: u32 = 0; +// scope.execute(move || { +// let mut sum: u32 = 0; - for _ in 0..(2 * N::to_u32()) { - loop { - match c.dequeue() { - Some(v) => { - sum = sum.wrapping_add(v as u32); - break; - } - _ => {} - } - } - } - - println!("consumer: {}", sum); - }); - }); - } - - assert!(rb.is_empty()); -} - -#[test] -fn mpmc_contention() { - const N: u32 = 64; - - static Q: Q64 = Q64::new(); - - let (s, r) = mpsc::channel(); - Pool::new(2).scoped(|scope| { - let s1 = s.clone(); - scope.execute(move || { - let mut sum: u32 = 0; - - for i in 0..(16 * N) { - sum = sum.wrapping_add(i); - while let Err(_) = Q.enqueue(i) {} - } - - s1.send(sum).unwrap(); - }); - - let s2 = s.clone(); - scope.execute(move || { - let mut sum: u32 = 0; - - for _ in 0..(16 * N) { - loop { - match Q.dequeue() { - Some(v) => { - sum = sum.wrapping_add(v); - break; - } - _ => {} - } - } - } - - s2.send(sum).unwrap(); - }); - }); - - assert_eq!(r.recv().unwrap(), r.recv().unwrap()); -} - -#[test] -fn unchecked() { - type N = U1024; - - let mut rb: spsc::Queue = spsc::Queue::new(); - - for _ in 0..N::to_usize() / 2 { - rb.enqueue(1).unwrap(); - } - - { - let (mut p, mut c) = rb.split(); - - Pool::new(2).scoped(move |scope| { - scope.execute(move || { - for _ in 0..N::to_usize() / 2 { - unsafe { - p.enqueue_unchecked(2); - } - } - }); - - scope.execute(move || { - let mut sum: usize = 0; - - for _ in 0..N::to_usize() / 2 { - sum = sum.wrapping_add(usize::from(unsafe { c.dequeue_unchecked() })); - } - - assert_eq!(sum, N::to_usize() / 2); - }); - }); - } - - assert_eq!(rb.len(), N::to_usize() / 2); -} - -#[test] -fn len_properly_wraps() { - type N = U3; - let mut rb: spsc::Queue = spsc::Queue::new(); - - rb.enqueue(1).unwrap(); - assert_eq!(rb.len(), 1); - rb.dequeue(); - assert_eq!(rb.len(), 0); - rb.enqueue(2).unwrap(); - assert_eq!(rb.len(), 1); - rb.enqueue(3).unwrap(); - assert_eq!(rb.len(), 2); - rb.enqueue(4).unwrap(); - assert_eq!(rb.len(), 3); -} - -#[test] -fn iterator_properly_wraps() { - type N = U3; - let mut rb: spsc::Queue = spsc::Queue::new(); - - rb.enqueue(1).unwrap(); - rb.dequeue(); - rb.enqueue(2).unwrap(); - rb.enqueue(3).unwrap(); - rb.enqueue(4).unwrap(); - let expected = [2, 3, 4]; - let mut actual = [0, 0, 0]; - for (idx, el) in rb.iter().enumerate() { - actual[idx] = *el; - } - assert_eq!(expected, actual) -} - -#[test] -fn pool() { - use heapless::pool::singleton::Pool as _; - - static mut M: [u8; (N + 1) * 8] = [0; (N + 1) * 8]; - const N: usize = 16 * 1024; - heapless::pool!(A: [u8; 8]); - - A::grow(unsafe { &mut M }); - - Pool::new(2).scoped(move |scope| { - scope.execute(move || { - for _ in 0..N / 4 { - let a = A::alloc().unwrap(); - let b = A::alloc().unwrap(); - drop(a); - let b = b.init([1; 8]); - drop(b); - } - }); - - scope.execute(move || { - for _ in 0..N / 2 { - let a = A::alloc().unwrap(); - let a = a.init([2; 8]); - drop(a); - } - }); - }); -} +// for _ in 0..(2 * N::to_u32()) { +// loop { +// match c.dequeue() { +// Some(v) => { +// sum = sum.wrapping_add(v as u32); +// break; +// } +// _ => {} +// } +// } +// } + +// println!("consumer: {}", sum); +// }); +// }); +// } + +// assert!(rb.is_empty()); +// } + +// #[test] +// fn mpmc_contention() { +// const N: u32 = 64; + +// static Q: Q64 = Q64::new(); + +// let (s, r) = mpsc::channel(); +// Pool::new(2).scoped(|scope| { +// let s1 = s.clone(); +// scope.execute(move || { +// let mut sum: u32 = 0; + +// for i in 0..(16 * N) { +// sum = sum.wrapping_add(i); +// while let Err(_) = Q.enqueue(i) {} +// } + +// s1.send(sum).unwrap(); +// }); + +// let s2 = s.clone(); +// scope.execute(move || { +// let mut sum: u32 = 0; + +// for _ in 0..(16 * N) { +// loop { +// match Q.dequeue() { +// Some(v) => { +// sum = sum.wrapping_add(v); +// break; +// } +// _ => {} +// } +// } +// } + +// s2.send(sum).unwrap(); +// }); +// }); + +// assert_eq!(r.recv().unwrap(), r.recv().unwrap()); +// } + +// #[test] +// fn unchecked() { +// type N = U1024; + +// let mut rb: spsc::Queue = spsc::Queue::new(); + +// for _ in 0..N::to_usize() / 2 { +// rb.enqueue(1).unwrap(); +// } + +// { +// let (mut p, mut c) = rb.split(); + +// Pool::new(2).scoped(move |scope| { +// scope.execute(move || { +// for _ in 0..N::to_usize() / 2 { +// unsafe { +// p.enqueue_unchecked(2); +// } +// } +// }); + +// scope.execute(move || { +// let mut sum: usize = 0; + +// for _ in 0..N::to_usize() / 2 { +// sum = sum.wrapping_add(usize::from(unsafe { c.dequeue_unchecked() })); +// } + +// assert_eq!(sum, N::to_usize() / 2); +// }); +// }); +// } + +// assert_eq!(rb.len(), N::to_usize() / 2); +// } + +// #[test] +// fn len_properly_wraps() { +// type N = U3; +// let mut rb: spsc::Queue = spsc::Queue::new(); + +// rb.enqueue(1).unwrap(); +// assert_eq!(rb.len(), 1); +// rb.dequeue(); +// assert_eq!(rb.len(), 0); +// rb.enqueue(2).unwrap(); +// assert_eq!(rb.len(), 1); +// rb.enqueue(3).unwrap(); +// assert_eq!(rb.len(), 2); +// rb.enqueue(4).unwrap(); +// assert_eq!(rb.len(), 3); +// } + +// #[test] +// fn iterator_properly_wraps() { +// type N = U3; +// let mut rb: spsc::Queue = spsc::Queue::new(); + +// rb.enqueue(1).unwrap(); +// rb.dequeue(); +// rb.enqueue(2).unwrap(); +// rb.enqueue(3).unwrap(); +// rb.enqueue(4).unwrap(); +// let expected = [2, 3, 4]; +// let mut actual = [0, 0, 0]; +// for (idx, el) in rb.iter().enumerate() { +// actual[idx] = *el; +// } +// assert_eq!(expected, actual) +// } + +// #[test] +// fn pool() { +// use heapless::pool::singleton::Pool as _; + +// static mut M: [u8; (N + 1) * 8] = [0; (N + 1) * 8]; +// const N: usize = 16 * 1024; +// heapless::pool!(A: [u8; 8]); + +// A::grow(unsafe { &mut M }); + +// Pool::new(2).scoped(move |scope| { +// scope.execute(move || { +// for _ in 0..N / 4 { +// let a = A::alloc().unwrap(); +// let b = A::alloc().unwrap(); +// drop(a); +// let b = b.init([1; 8]); +// drop(b); +// } +// }); + +// scope.execute(move || { +// for _ in 0..N / 2 { +// let a = A::alloc().unwrap(); +// let a = a.init([2; 8]); +// drop(a); +// } +// }); +// }); +// } From 6dcedb89f3e8c9f745a97b27e38104b6ff63079e Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 25 Mar 2021 16:51:38 +0100 Subject: [PATCH 14/37] Fixes for min const generics --- Cargo.toml | 3 +-- src/binary_heap.rs | 24 +++++++++--------------- src/i.rs | 40 ---------------------------------------- src/lib.rs | 7 +------ src/linear_map.rs | 39 ++++++++------------------------------- 5 files changed, 19 insertions(+), 94 deletions(-) delete mode 100644 src/i.rs diff --git a/Cargo.toml b/Cargo.toml index 38b3988175..47a508bd09 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,8 +32,7 @@ __trybuild = [] scoped_threadpool = "0.1.8" [dependencies] -as-slice = "0.1.0" -# generic-array = "0.13.0" +as-slice = "0.2.0" hash32 = "0.1.0" [dependencies.serde] diff --git a/src/binary_heap.rs b/src/binary_heap.rs index 15d16aa948..caddf46d08 100644 --- a/src/binary_heap.rs +++ b/src/binary_heap.rs @@ -72,20 +72,12 @@ pub enum Max {} /// assert!(heap.is_empty()) /// ``` -pub struct BinaryHeap -where - T: Ord, - K: Kind, -{ +pub struct BinaryHeap { pub(crate) _kind: PhantomData, pub(crate) data: Vec, } -impl BinaryHeap -where - T: Ord, - K: Kind, -{ +impl BinaryHeap { /* Constructors */ /// Creates an empty BinaryHeap as a $K-heap. /// @@ -105,7 +97,13 @@ where data: Vec::new(), } } +} +impl BinaryHeap +where + T: Ord, + K: Kind, +{ /* Public API */ /// Returns the capacity of the binary heap. pub fn capacity(&self) -> usize { @@ -512,11 +510,7 @@ where } } -impl Drop for BinaryHeap -where - K: Kind, - T: Ord, -{ +impl Drop for BinaryHeap { fn drop(&mut self) { unsafe { ptr::drop_in_place(self.data.as_mut_slice()) } } diff --git a/src/i.rs b/src/i.rs deleted file mode 100644 index b8a88a907c..0000000000 --- a/src/i.rs +++ /dev/null @@ -1,40 +0,0 @@ -//! Unfortunate implementation detail required to construct `heapless` types in const context - -use core::{marker::PhantomData, mem::MaybeUninit}; - -#[cfg(has_atomics)] -use crate::spsc::{Atomic, MultiCore}; - -/// `const-fn` version of [`BinaryHeap`](../binary_heap/struct.BinaryHeap.html) -pub struct BinaryHeap { - pub(crate) _kind: PhantomData, - pub(crate) data: Vec, -} - -/// `const-fn` version of [`LinearMap`](../struct.LinearMap.html) -pub struct LinearMap { - pub(crate) buffer: Vec, -} - -/// `const-fn` version of [`spsc::Queue`](../spsc/struct.Queue.html) -#[cfg(has_atomics)] -pub struct Queue { - // this is from where we dequeue items - pub(crate) head: Atomic, - - // this is where we enqueue new items - pub(crate) tail: Atomic, - - pub(crate) buffer: MaybeUninit, -} - -/// `const-fn` version of [`String`](../struct.String.html) -pub struct String { - pub(crate) vec: Vec, -} - -/// `const-fn` version of [`Vec`](../struct.Vec.html) -pub struct Vec { - pub(crate) buffer: MaybeUninit, - pub(crate) len: usize, -} diff --git a/src/lib.rs b/src/lib.rs index 612ecc0210..c46190523e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -67,8 +67,6 @@ //! It *might* compile on older versions but that may change in any new patch release. // experimental usage of const generics, requires nightly 2020-08-18 (or newer) -#![feature(min_const_generics)] -#![feature(const_fn)] #![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![deny(rust_2018_compatibility)] @@ -76,8 +74,6 @@ // #![deny(warnings)] pub use binary_heap::BinaryHeap; -// pub use generic_array::typenum::{consts, PowerOfTwo}; -// pub use generic_array::ArrayLength; pub use histbuf::HistoryBuffer; pub use indexmap::{Bucket, FnvIndexMap, IndexMap, Pos}; pub use indexset::{FnvIndexSet, IndexSet}; @@ -99,10 +95,9 @@ mod de; mod ser; pub mod binary_heap; -// pub mod i; #[cfg(all(has_cas, feature = "cas"))] pub mod mpmc; -// #[cfg(all(has_cas, feature = "cas"))] +#[cfg(all(has_cas, feature = "cas"))] pub mod pool; #[cfg(has_atomics)] pub mod spsc; diff --git a/src/linear_map.rs b/src/linear_map.rs index 35cfdd8a4b..b1adcdfb6b 100644 --- a/src/linear_map.rs +++ b/src/linear_map.rs @@ -12,17 +12,11 @@ use crate::Vec; /// /// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1) -pub struct LinearMap -where - K: Eq, -{ +pub struct LinearMap { pub(crate) buffer: Vec<(K, V), N>, } -impl LinearMap -where - K: Eq, -{ +impl LinearMap { /// Creates an empty `LinearMap` /// /// # Examples @@ -39,7 +33,12 @@ where pub const fn new() -> Self { Self { buffer: Vec::new() } } +} +impl LinearMap +where + K: Eq, +{ /// Returns the number of elements that the map can hold /// /// Computes in **O(1)** time @@ -437,25 +436,6 @@ where } } -// TODO: Why is this needed at all, no example, no test... I don't get it -// impl IntoIterator for LinearMap -// where -// K: Eq, -// { -// type Item = (K, V); -// type IntoIter = IntoIter; - -// fn into_iter(mut self) -> Self::IntoIter { -// // FIXME this may result in a memcpy at runtime -// let lm = mem::replace(&mut self, unsafe { MaybeUninit::uninit().assume_init() }); -// mem::forget(self); - -// Self::IntoIter { -// inner: lm.buffer.into_iter(), -// } -// } -// } - impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap where K: Eq, @@ -488,10 +468,7 @@ impl<'a, K, V> Clone for Iter<'a, K, V> { } } -impl Drop for LinearMap -where - K: Eq, -{ +impl Drop for LinearMap { fn drop(&mut self) { // heapless::Vec implements drop right? drop(&self.buffer); From 305de719f6c44906d866f54d5cd87b4a3a52e4be Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 25 Mar 2021 17:52:15 +0100 Subject: [PATCH 15/37] Fixed warnings --- src/de.rs | 8 +------- src/indexset.rs | 2 +- src/linear_map.rs | 9 +-------- src/ser.rs | 13 ++++--------- src/vec.rs | 1 - 5 files changed, 7 insertions(+), 26 deletions(-) diff --git a/src/de.rs b/src/de.rs index 7da79d5821..229a355ff8 100644 --- a/src/de.rs +++ b/src/de.rs @@ -1,13 +1,7 @@ -//! missing doc - use core::{fmt, marker::PhantomData}; - -// use generic_array::{typenum::PowerOfTwo, ArrayLength}; use hash32::{BuildHasherDefault, Hash, Hasher}; use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; - use crate::{ - indexmap::{Bucket, Pos}, sealed::binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec, }; @@ -230,7 +224,7 @@ impl<'de, const N:usize> Deserialize<'de> for String where D: Deserializer<'de>, { - struct ValueVisitor<'de, const N:usize>(PhantomData<(&'de ())>); + struct ValueVisitor<'de, const N:usize>(PhantomData<&'de ()>); impl<'de, const N:usize > de::Visitor<'de> for ValueVisitor<'de, N> { diff --git a/src/indexset.rs b/src/indexset.rs index 75d34dc53f..bd36df4161 100644 --- a/src/indexset.rs +++ b/src/indexset.rs @@ -1,4 +1,4 @@ -use crate::indexmap::{self, Bucket, IndexMap, Pos}; +use crate::indexmap::{self, IndexMap}; use core::{borrow::Borrow, fmt, iter::FromIterator}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; diff --git a/src/linear_map.rs b/src/linear_map.rs index b1adcdfb6b..d096f210a2 100644 --- a/src/linear_map.rs +++ b/src/linear_map.rs @@ -1,12 +1,5 @@ -use core::{ - borrow::Borrow, - fmt, - iter::FromIterator, - mem::{self, MaybeUninit}, - ops, ptr, slice, -}; - use crate::Vec; +use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice}; /// A fixed capacity map / dictionary that performs lookups via linear search /// diff --git a/src/ser.rs b/src/ser.rs index 32bc980c76..7214f096db 100644 --- a/src/ser.rs +++ b/src/ser.rs @@ -1,14 +1,9 @@ -//! missing doc - -// use generic_array::{typenum::PowerOfTwo, ArrayLength}; -use hash32::{BuildHasher, Hash}; -use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; - use crate::{ - indexmap::{Bucket, Pos}, - sealed::binary_heap::Kind as BinaryHeapKind, - BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec, + sealed::binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String, + Vec, }; +use hash32::{BuildHasher, Hash}; +use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer}; // Sequential containers diff --git a/src/vec.rs b/src/vec.rs index aaf6f11c8f..f15aaeb7be 100644 --- a/src/vec.rs +++ b/src/vec.rs @@ -792,7 +792,6 @@ where #[cfg(test)] mod tests { use crate::Vec; - use as_slice::AsSlice; use core::fmt::Write; #[test] From 3de878b09eb45a6736142da6a48d2ca42394643b Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 25 Mar 2021 19:42:21 +0100 Subject: [PATCH 16/37] Cleanup --- src/indexmap.rs | 16 +++++++++------- src/indexset.rs | 3 ++- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/indexmap.rs b/src/indexmap.rs index 6f77d306d0..c2e5d459b6 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -127,25 +127,27 @@ macro_rules! probe_loop { } struct CoreMap -where - K: Eq + Hash, { entries: Vec, N>, indices: [Option; N], } impl CoreMap -where - K: Eq + Hash, { - // TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn` - fn new() -> Self { + const fn new() -> Self { + const INIT: Option = None; + CoreMap { entries: Vec::new(), - indices: unsafe { MaybeUninit::zeroed().assume_init() }, + indices: [INIT; N], } } +} +impl CoreMap +where + K: Eq + Hash, +{ fn capacity() -> usize { N } diff --git a/src/indexset.rs b/src/indexset.rs index bd36df4161..232564717a 100644 --- a/src/indexset.rs +++ b/src/indexset.rs @@ -2,7 +2,6 @@ use crate::indexmap::{self, IndexMap}; use core::{borrow::Borrow, fmt, iter::FromIterator}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; -// TODO: We don't enforce the power of 2 currently (part of generic array bounds) /// A [`heapless::IndexSet`](./struct.IndexSet.html) using the /// default FNV hasher. /// A list of all Methods and Traits available for `FnvIndexSet` can be found in @@ -89,6 +88,8 @@ where { /// Creates an empty `IndexSet` pub fn new() -> Self { + assert!(N.is_power_of_two()); + IndexSet { map: IndexMap::new(), } From f7cb3e575f4efdf3913abf15a46dafce0213d3a7 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sun, 28 Mar 2021 09:48:34 +0300 Subject: [PATCH 17/37] Fix fmt --- src/de.rs | 32 +++++++++++++------------------- src/indexmap.rs | 6 ++---- src/ufmt.rs | 2 +- tests/cpass.rs | 2 +- tests/tsan.rs | 4 ++-- 5 files changed, 19 insertions(+), 27 deletions(-) diff --git a/src/de.rs b/src/de.rs index 229a355ff8..73dcf0e4a3 100644 --- a/src/de.rs +++ b/src/de.rs @@ -1,10 +1,10 @@ +use crate::{ + sealed::binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String, + Vec, +}; use core::{fmt, marker::PhantomData}; use hash32::{BuildHasherDefault, Hash, Hasher}; use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess}; -use crate::{ - sealed::binary_heap::Kind as BinaryHeapKind, - BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec, -}; // Sequential containers @@ -142,9 +142,9 @@ where where D: Deserializer<'de>, { - struct ValueVisitor<'de, K, V, S, const N:usize>(PhantomData<(&'de (), K, V, S)>); + struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>); - impl<'de, K, V, S, const N:usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> + impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N> where K: Eq + Hash + Deserialize<'de>, V: Deserialize<'de>, @@ -175,7 +175,7 @@ where } } -impl<'de, K, V, const N:usize> Deserialize<'de> for LinearMap +impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap where K: Eq + Deserialize<'de>, V: Deserialize<'de>, @@ -184,9 +184,9 @@ where where D: Deserializer<'de>, { - struct ValueVisitor<'de, K, V, const N:usize>(PhantomData<(&'de (), K, V)>); + struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>); - impl<'de, K, V, const N:usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> + impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N> where K: Eq + Deserialize<'de>, V: Deserialize<'de>, @@ -218,24 +218,18 @@ where // String containers -impl<'de, const N:usize> Deserialize<'de> for String -{ +impl<'de, const N: usize> Deserialize<'de> for String { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { - struct ValueVisitor<'de, const N:usize>(PhantomData<&'de ()>); + struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>); - impl<'de, const N:usize > de::Visitor<'de> for ValueVisitor<'de, N> - { + impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> { type Value = String; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - formatter, - "a string no more than {} bytes long", - N as u64 - ) + write!(formatter, "a string no more than {} bytes long", N as u64) } fn visit_str(self, v: &str) -> Result diff --git a/src/indexmap.rs b/src/indexmap.rs index c2e5d459b6..6636dede02 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -126,14 +126,12 @@ macro_rules! probe_loop { } } -struct CoreMap -{ +struct CoreMap { entries: Vec, N>, indices: [Option; N], } -impl CoreMap -{ +impl CoreMap { const fn new() -> Self { const INIT: Option = None; diff --git a/src/ufmt.rs b/src/ufmt.rs index 84d87a4d23..30da99acd4 100644 --- a/src/ufmt.rs +++ b/src/ufmt.rs @@ -1,5 +1,5 @@ -use ufmt_write::uWrite; use crate::{string::String, vec::Vec}; +use ufmt_write::uWrite; impl uWrite for String { type Error = (); diff --git a/tests/cpass.rs b/tests/cpass.rs index 24b138e599..18d88d194b 100644 --- a/tests/cpass.rs +++ b/tests/cpass.rs @@ -1,7 +1,7 @@ //! Collections of `Send`-able things are `Send` use heapless::{ - spsc::{Consumer, Producer, Queue, MultiCore}, + spsc::{Consumer, MultiCore, Producer, Queue}, HistoryBuffer, Vec, }; diff --git a/tests/tsan.rs b/tests/tsan.rs index 6e9f264b38..7da1de7bdc 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -188,7 +188,7 @@ fn unchecked() { scope.execute(move || { let mut sum: usize = 0; - for _ in 0..N/ 2 { + for _ in 0..N / 2 { sum = sum.wrapping_add(usize::from(unsafe { c.dequeue_unchecked() })); } @@ -197,7 +197,7 @@ fn unchecked() { }); } - assert_eq!(rb.len(), N/ 2); + assert_eq!(rb.len(), N / 2); } #[test] From a3f4d8f41eeeb1f9ce1b773ca900ce39de558230 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sun, 28 Mar 2021 09:49:34 +0300 Subject: [PATCH 18/37] Update minimal Rust version from 1.31 to 1.51 --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e7c5fbd5aa..1b6eb17b58 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -105,7 +105,7 @@ jobs: toolchain: - stable - nightly - - 1.36.0 + - 1.51.0 features: - serde buildtype: @@ -242,7 +242,7 @@ jobs: - name: Install Rust uses: actions-rs/toolchain@v1 with: - toolchain: 1.36.0 + toolchain: 1.51.0 target: x86_64-unknown-linux-gnu override: true From 92cc062a4c83c116d238c077a3c362003b3dd709 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sun, 28 Mar 2021 10:22:19 +0300 Subject: [PATCH 19/37] Fix cargo test without cargo features enabled --- src/pool/singleton.rs | 2 ++ tests/tsan.rs | 1 + 2 files changed, 3 insertions(+) diff --git a/src/pool/singleton.rs b/src/pool/singleton.rs index 17763df436..ccfa2de955 100644 --- a/src/pool/singleton.rs +++ b/src/pool/singleton.rs @@ -15,12 +15,14 @@ use as_slice::{AsMutSlice, AsSlice}; use super::{Init, Node, Uninit}; /// Instantiates a pool as a global singleton +// NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing) #[cfg(any( armv7a, armv7r, armv7m, armv8m_main, all(target_arch = "x86_64", feature = "x86-sync-pool"), + test ))] #[macro_export] macro_rules! pool { diff --git a/tests/tsan.rs b/tests/tsan.rs index 7da1de7bdc..9ea18add39 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -235,6 +235,7 @@ fn iterator_properly_wraps() { assert_eq!(expected, actual) } +#[cfg(all(target_arch = "x86_64", feature = "x86-sync-pool"))] #[test] fn pool() { use heapless::pool::singleton::Pool as _; From 7fb276f6dcc7b978116e9c8b12aa67e9f7860802 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sun, 28 Mar 2021 10:23:24 +0300 Subject: [PATCH 20/37] Fix warnings in src/indexmap.rs --- src/indexmap.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/indexmap.rs b/src/indexmap.rs index 6636dede02..6b4064a3e4 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -1,11 +1,4 @@ -use core::{ - borrow::Borrow, - fmt, - iter::FromIterator, - mem::{self, MaybeUninit}, - num::NonZeroU32, - ops, slice, -}; +use core::{borrow::Borrow, fmt, iter::FromIterator, mem, num::NonZeroU32, ops, slice}; use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher}; From 5bde47c214829c22c56b57dfbbb6868a5c9c55b4 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sun, 28 Mar 2021 10:58:00 +0300 Subject: [PATCH 21/37] Fix cfail tests --- cfail/ui/freeze.rs | 4 +- cfail/ui/not-send.rs | 9 ++- cfail/ui/not-send.stderr | 122 +++++++++++++++++---------------------- 3 files changed, 60 insertions(+), 75 deletions(-) diff --git a/cfail/ui/freeze.rs b/cfail/ui/freeze.rs index d1adc1f1f7..b5e5ace6cf 100644 --- a/cfail/ui/freeze.rs +++ b/cfail/ui/freeze.rs @@ -1,7 +1,7 @@ -use heapless::{consts, spsc::Queue}; +use heapless::{spsc::Queue}; fn main() { - let mut q: Queue = Queue::new(); + let mut q: Queue = Queue::new(); let (_p, mut _c) = q.split(); q.enqueue(0).unwrap(); diff --git a/cfail/ui/not-send.rs b/cfail/ui/not-send.rs index cdd1359412..f5ea87c6b7 100644 --- a/cfail/ui/not-send.rs +++ b/cfail/ui/not-send.rs @@ -3,7 +3,6 @@ use core::marker::PhantomData; use heapless::{ - consts, spsc::{Consumer, Producer, Queue}, }; @@ -16,8 +15,8 @@ where } fn main() { - is_send::>(); - is_send::>(); - is_send::>(); - is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); } diff --git a/cfail/ui/not-send.stderr b/cfail/ui/not-send.stderr index a1813b629b..878e924353 100644 --- a/cfail/ui/not-send.stderr +++ b/cfail/ui/not-send.stderr @@ -1,83 +1,69 @@ error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:19:5 - | -19 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:18:5 | - = help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()` - = note: required because it appears within the type `std::marker::PhantomData<*const ()>` - = note: required because of the requirements on the impl of `std::marker::Send` for `heapless::spsc::split::Consumer<'_, std::marker::PhantomData<*const ()>, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` -note: required by `is_send` - --> $DIR/not-send.rs:12:1 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +18 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -12 | / fn is_send() -13 | | where -14 | | T: Send, -15 | | { -16 | | } - | |_^ + = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, _, _, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:20:5 - | -20 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:19:5 | - = help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()` - = note: required because it appears within the type `std::marker::PhantomData<*const ()>` - = note: required because of the requirements on the impl of `std::marker::Send` for `heapless::spsc::split::Producer<'_, std::marker::PhantomData<*const ()>, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` -note: required by `is_send` - --> $DIR/not-send.rs:12:1 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +19 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -12 | / fn is_send() -13 | | where -14 | | T: Send, -15 | | { -16 | | } - | |_^ + = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, _, _, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:21:5 - | -21 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:20:5 | - = help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()` - = note: required because it appears within the type `std::marker::PhantomData<*const ()>` - = note: required because of the requirements on the impl of `std::marker::Send` for `generic_array::GenericArray, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` - = note: required because it appears within the type `std::mem::ManuallyDrop, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `std::mem::MaybeUninit, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `heapless::i::Queue, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `heapless::spsc::Queue, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` -note: required by `is_send` - --> $DIR/not-send.rs:12:1 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +20 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -12 | / fn is_send() -13 | | where -14 | | T: Send, -15 | | { -16 | | } - | |_^ + = help: within `Queue, _, _, 4_usize>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because it appears within the type `[PhantomData<*const ()>; 4]` + = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `Queue, _, _, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:22:5 - | -22 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:21:5 | - = help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()` - = note: required because it appears within the type `std::marker::PhantomData<*const ()>` - = note: required because of the requirements on the impl of `std::marker::Send` for `generic_array::GenericArray, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` - = note: required because it appears within the type `std::mem::ManuallyDrop, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `std::mem::MaybeUninit, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `heapless::i::Vec, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>>` - = note: required because it appears within the type `heapless::vec::Vec, typenum::uint::UInt, typenum::bit::B0>, typenum::bit::B0>>` -note: required by `is_send` - --> $DIR/not-send.rs:12:1 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +21 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -12 | / fn is_send() -13 | | where -14 | | T: Send, -15 | | { -16 | | } - | |_^ + = help: within `heapless::Vec, 4_usize>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because it appears within the type `[PhantomData<*const ()>; 4]` + = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `heapless::Vec, 4_usize>` From 1444990e529403737bc6aeccf8480219f957c956 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 1 Apr 2021 19:14:40 +0200 Subject: [PATCH 22/37] Cleanup and HistoryBuffer is now const and using MaybeUninit --- Cargo.toml | 1 + src/histbuf.rs | 117 +++++++++++++++++++++++++++++++----------------- src/indexmap.rs | 1 - src/lib.rs | 5 +-- src/spsc/mod.rs | 2 +- 5 files changed, 81 insertions(+), 45 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 2c95e459f3..e7ba77f30c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,7 @@ authors = [ "Jorge Aparicio ", "Per Lindgren ", + "Emil Fresk ", ] categories = [ "data-structures", diff --git a/src/histbuf.rs b/src/histbuf.rs index df67844f97..4fed656562 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -1,14 +1,13 @@ +use core::mem::MaybeUninit; +use core::ptr; +use core::slice; + /// A "history buffer", similar to a write-only ring buffer of fixed length. /// /// This buffer keeps a fixed number of elements. On write, the oldest element /// is overwritten. Thus, the buffer is useful to keep a history of values with /// some desired depth, and for example calculate a rolling average. /// -/// The buffer is always fully initialized; depending on the constructor, the -/// initial value is either the default value for the element type or a supplied -/// initial value. This simplifies the API and is mostly irrelevant for the -/// intended use case. -/// /// # Examples /// ``` /// use heapless::HistoryBuffer; @@ -16,34 +15,35 @@ /// // Initialize a new buffer with 8 elements, all initially zero. /// let mut buf = HistoryBuffer::<_, 8>::new(); /// +/// // Starts with no data +/// assert_eq!(buf.recent(), None); +/// /// buf.write(3); /// buf.write(5); /// buf.extend(&[4, 4]); /// /// // The most recent written element is a four. -/// assert_eq!(buf.recent(), &4); +/// assert_eq!(buf.recent(), Some(&4)); /// /// // To access all elements in an unspecified order, use `as_slice()`. /// for el in buf.as_slice() { println!("{:?}", el); } /// -/// // Now we can prepare an average of all values, which comes out to 2. +/// // Now we can prepare an average of all values, which comes out to 4. /// let avg = buf.as_slice().iter().sum::() / buf.len(); -/// assert_eq!(avg, 2); +/// assert_eq!(avg, 4); /// ``` -#[derive(Clone)] pub struct HistoryBuffer { - data: [T; N], + data: [MaybeUninit; N], write_at: usize, + filled: bool, } -impl HistoryBuffer -where - T: Default + Copy, -{ - /// Constructs a new history buffer, where every element is filled with the - /// default value of the type `T`. +impl HistoryBuffer { + const INIT: MaybeUninit = MaybeUninit::uninit(); + + /// Constructs a new history buffer. /// - /// `HistoryBuffer` currently cannot be constructed in `const` context. + /// The construction of a `HistoryBuffer` works in `const` contexts. /// /// # Examples /// @@ -51,16 +51,15 @@ where /// use heapless::HistoryBuffer; /// /// // Allocate a 16-element buffer on the stack - /// let mut x: HistoryBuffer = HistoryBuffer::new(); - /// // All elements are zero - /// assert_eq!(x.as_slice(), [0; 16]); + /// let x: HistoryBuffer = HistoryBuffer::new(); + /// assert_eq!(x.len(), 0); /// ``` - pub fn new() -> Self { + #[inline] + pub const fn new() -> Self { Self { - // seems not yet implemented - // data: Default::default(), - data: [T::default(); N], + data: [Self::INIT; N], write_at: 0, + filled: false, } } @@ -87,10 +86,12 @@ where /// // All elements are four /// assert_eq!(x.as_slice(), [4; 16]); /// ``` + #[inline] pub fn new_with(t: T) -> Self { Self { - data: [t; N], + data: [MaybeUninit::new(t); N], write_at: 0, + filled: true, } } @@ -101,18 +102,35 @@ where } impl HistoryBuffer { + /// Returns the current fill level of the buffer. + #[inline] + pub fn len(&self) -> usize { + if self.filled { + N + } else { + self.write_at + } + } + /// Returns the capacity of the buffer, which is the length of the /// underlying backing array. - pub fn len(&self) -> usize { - self.data.len() + #[inline] + pub fn capacity(&self) -> usize { + N } /// Writes an element to the buffer, overwriting the oldest value. pub fn write(&mut self, t: T) { - self.data[self.write_at] = t; + if self.filled { + // Drop the old before we overwrite it. + unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) } + } + self.data[self.write_at] = MaybeUninit::new(t); + self.write_at += 1; - if self.write_at == self.len() { + if self.write_at == self.capacity() { self.write_at = 0; + self.filled = true; } } @@ -139,20 +157,28 @@ impl HistoryBuffer { /// let mut x: HistoryBuffer = HistoryBuffer::new(); /// x.write(4); /// x.write(10); - /// assert_eq!(x.recent(), &10); + /// assert_eq!(x.recent(), Some(&10)); /// ``` - pub fn recent(&self) -> &T { + pub fn recent(&self) -> Option<&T> { if self.write_at == 0 { - &self.data[self.len() - 1] + if self.filled { + Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() }) + } else { + None + } } else { - &self.data[self.write_at - 1] + Some(unsafe { &*self.data[self.write_at - 1].as_ptr() }) } } /// Returns the array slice backing the buffer, without keeping track /// of the write position. Therefore, the element order is unspecified. pub fn as_slice(&self) -> &[T] { - &self.data + if self.filled { + unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.capacity()) } + } else { + unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.write_at) } + } } } @@ -179,6 +205,17 @@ where } } +impl Drop for HistoryBuffer { + fn drop(&mut self) { + unsafe { + ptr::drop_in_place(ptr::slice_from_raw_parts_mut( + self.data.as_mut_ptr() as *mut T, + self.len(), + )) + } + } +} + #[cfg(test)] mod tests { use crate::HistoryBuffer; @@ -190,7 +227,7 @@ mod tests { assert_eq!(x.as_slice(), [1; 4]); let x: HistoryBuffer = HistoryBuffer::new(); - assert_eq!(x.as_slice(), [0; 4]); + assert_eq!(x.as_slice(), []); } #[test] @@ -198,7 +235,7 @@ mod tests { let mut x: HistoryBuffer = HistoryBuffer::new(); x.write(1); x.write(4); - assert_eq!(x.as_slice(), [1, 4, 0, 0]); + assert_eq!(x.as_slice(), [1, 4]); x.write(5); x.write(6); @@ -213,7 +250,7 @@ mod tests { fn clear() { let mut x: HistoryBuffer = HistoryBuffer::new_with(1); x.clear(); - assert_eq!(x.as_slice(), [0; 4]); + assert_eq!(x.as_slice(), []); let mut x: HistoryBuffer = HistoryBuffer::new(); x.clear_with(1); @@ -223,16 +260,16 @@ mod tests { #[test] fn recent() { let mut x: HistoryBuffer = HistoryBuffer::new(); - assert_eq!(x.recent(), &0); + assert_eq!(x.recent(), None); x.write(1); x.write(4); - assert_eq!(x.recent(), &4); + assert_eq!(x.recent(), Some(&4)); x.write(5); x.write(6); x.write(10); - assert_eq!(x.recent(), &10); + assert_eq!(x.recent(), Some(&10)); } #[test] diff --git a/src/indexmap.rs b/src/indexmap.rs index 6b4064a3e4..20b1b5e47e 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -375,7 +375,6 @@ where K: Eq + Hash, S: Default + Hasher, { - // TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn` /// Creates an empty `IndexMap`. /// /// **NOTE** This constructor will become a `const fn` in the future diff --git a/src/lib.rs b/src/lib.rs index c46190523e..54b4f65319 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -63,15 +63,14 @@ //! //! # Minimum Supported Rust Version (MSRV) //! -//! This crate is guaranteed to compile on stable Rust 1.36 and up with its default set of features. +//! This crate is guaranteed to compile on stable Rust 1.51 and up with its default set of features. //! It *might* compile on older versions but that may change in any new patch release. -// experimental usage of const generics, requires nightly 2020-08-18 (or newer) #![cfg_attr(not(test), no_std)] #![deny(missing_docs)] #![deny(rust_2018_compatibility)] #![deny(rust_2018_idioms)] -// #![deny(warnings)] +#![deny(warnings)] pub use binary_heap::BinaryHeap; pub use histbuf::HistoryBuffer; diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index 0aea97b0c0..ef7820b0e1 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -142,7 +142,7 @@ where /// A statically allocated single producer single consumer queue with a capacity of `N` elements /// -/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `U16`, `U32`, +/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `16`, `32`, /// etc.). /// /// By default `spsc::Queue` will use `usize` integers to hold the indices to its head and tail. For From 12682bdfd86e5b89ca718efc51c4fb4b410206cf Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 1 Apr 2021 19:43:27 +0200 Subject: [PATCH 23/37] Removed unnecessary String interface, already covered by `core` --- src/de.rs | 14 +++--- src/string.rs | 129 +------------------------------------------------- 2 files changed, 9 insertions(+), 134 deletions(-) diff --git a/src/de.rs b/src/de.rs index 73dcf0e4a3..32a4d333db 100644 --- a/src/de.rs +++ b/src/de.rs @@ -246,13 +246,15 @@ impl<'de, const N: usize> Deserialize<'de> for String { where E: de::Error, { - let mut bytes = Vec::new(); - if bytes.extend_from_slice(v).is_err() { - return Err(E::invalid_value(de::Unexpected::Bytes(v), &self)); - } + let mut s = String::new(); - String::from_utf8(bytes) - .map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self)) + s.push_str( + core::str::from_utf8(v) + .map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?, + ) + .map_err(|_| E::invalid_length(v.len(), &self))?; + + Ok(s) } } diff --git a/src/string.rs b/src/string.rs index d0076ee67f..23735fcea3 100644 --- a/src/string.rs +++ b/src/string.rs @@ -1,11 +1,4 @@ -use core::{ - fmt, - fmt::Write, - hash, - mem::{self, MaybeUninit}, - ops, str, - str::Utf8Error, -}; +use core::{fmt, fmt::Write, hash, ops, str}; use hash32; @@ -16,13 +9,6 @@ pub struct String { vec: Vec, } -// impl String { -// /// `String` `const` constructor; wrap the returned value in [`String`](../struct.String.html) -// pub const fn new() -> Self { -// Self { vec: Vec::new() } -// } -// } - impl String { /// Constructs a new, empty `String` with a fixed capacity of `N` /// @@ -44,65 +30,6 @@ impl String { Self { vec: Vec::new() } } - /// Converts a vector of bytes into a `String`. - /// - /// A string slice ([`&str`]) is made of bytes ([`u8`]), and a vector of bytes - /// ([`Vec`]) is made of bytes, so this function converts between the - /// two. Not all byte slices are valid `String`s, however: `String` - /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that - /// the bytes are valid UTF-8, and then does the conversion. - /// - /// See std::String for further information. - /// - /// # Examples - /// - /// Basic usage: - /// - /// ``` - /// use heapless::{String, Vec}; - /// - /// let mut v: Vec = Vec::new(); - /// v.push('a' as u8).unwrap(); - /// v.push('b' as u8).unwrap(); - /// - /// let s = String::from_utf8(v).unwrap(); - /// assert!(s.len() == 2); - /// ``` - /// - /// Incorrect bytes: - /// - /// ``` - /// use heapless::{String, Vec}; - /// - /// // some invalid bytes, in a vector - /// - /// let mut v: Vec = Vec::new(); - /// v.push(0).unwrap(); - /// v.push(159).unwrap(); - /// v.push(146).unwrap(); - /// v.push(150).unwrap(); - /// assert!(String::from_utf8(v).is_err()); - /// ``` - #[inline] - pub fn from_utf8(vec: Vec) -> Result, Utf8Error> { - // validate input - str::from_utf8(&*vec)?; - - Ok(unsafe { String::from_utf8_unchecked(vec) }) - } - - /// Converts a vector of bytes to a `String` without checking that the - /// string contains valid UTF-8. - /// - /// See the safe version, `from_utf8`, for more details. - #[inline] - pub unsafe fn from_utf8_unchecked(mut vec: Vec) -> String { - // FIXME this may result in a memcpy at runtime - let vec_ = mem::replace(&mut vec, MaybeUninit::uninit().assume_init()); - mem::forget(vec); - String { vec: vec_ } - } - /// Converts a `String` into a byte vector. /// /// This consumes the `String`, so we do not need to copy its contents. @@ -544,14 +471,6 @@ impl PartialEq> for &str { impl Eq for String {} -// impl From for String { -// fn from(s: D) -> Self { -// let mut new = String::new(); -// write!(&mut new, "{}", s).unwrap(); -// new -// } -// } - macro_rules! impl_from_num { ($num:ty, $size:expr) => { impl From<$num> for String { @@ -647,52 +566,6 @@ mod tests { let _: String<4> = String::from("12345"); } - #[test] - fn from_utf8() { - let mut v: Vec = Vec::new(); - v.push('a' as u8).unwrap(); - v.push('b' as u8).unwrap(); - - let s = String::from_utf8(v).unwrap(); - assert_eq!(s, "ab"); - } - - #[test] - fn from_utf8_uenc() { - let mut v: Vec = Vec::new(); - v.push(240).unwrap(); - v.push(159).unwrap(); - v.push(146).unwrap(); - v.push(150).unwrap(); - - assert!(String::from_utf8(v).is_ok()); - } - - #[test] - fn from_utf8_uenc_err() { - let mut v: Vec = Vec::new(); - v.push(0).unwrap(); - v.push(159).unwrap(); - v.push(146).unwrap(); - v.push(150).unwrap(); - - assert!(String::from_utf8(v).is_err()); - } - - #[test] - fn from_utf8_unchecked() { - let mut v: Vec = Vec::new(); - v.push(104).unwrap(); - v.push(101).unwrap(); - v.push(108).unwrap(); - v.push(108).unwrap(); - v.push(111).unwrap(); - - let s = unsafe { String::from_utf8_unchecked(v) }; - - assert_eq!(s, "hello"); - } - #[test] fn from_num() { let v: String<20> = String::from(18446744073709551615 as u64); From 870a9252298809f202a0d8266c73ec9c5de1e816 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 1 Apr 2021 20:15:13 +0200 Subject: [PATCH 24/37] Removed SingeCore and MultiCore, relies on atomics now --- cfail/ui/freeze.stderr | 22 +++-- cfail/ui/not-send.stderr | 75 +++++++--------- src/sealed.rs | 120 +++++++------------------ src/spsc/mod.rs | 190 ++++++++++++++------------------------- src/spsc/split.rs | 43 +++------ tests/cpass.rs | 8 +- tests/tsan.rs | 19 ++-- 7 files changed, 168 insertions(+), 309 deletions(-) diff --git a/cfail/ui/freeze.stderr b/cfail/ui/freeze.stderr index 56c116e31b..40eabe4833 100644 --- a/cfail/ui/freeze.stderr +++ b/cfail/ui/freeze.stderr @@ -1,9 +1,13 @@ -error[E0499]: cannot borrow `q` as mutable more than once at a time - --> $DIR/freeze.rs:7:5 - | -6 | let (_p, mut _c) = q.split(); - | - first mutable borrow occurs here -7 | q.enqueue(0).unwrap(); - | ^ second mutable borrow occurs here -8 | _c.dequeue(); - | -- first borrow later used here +error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied + --> $DIR/freeze.rs:4:16 + | +4 | let mut q: Queue = Queue::new(); + | ^^^^^ --- help: remove this generic argument + | | + | expected 3 generic arguments + | +note: struct defined here, with 3 generic parameters: `T`, `U`, `N` + --> $DIR/mod.rs:151:12 + | +151 | pub struct Queue + | ^^^^^ - - - diff --git a/cfail/ui/not-send.stderr b/cfail/ui/not-send.stderr index 878e924353..544fc8ccef 100644 --- a/cfail/ui/not-send.stderr +++ b/cfail/ui/not-send.stderr @@ -1,53 +1,44 @@ -error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:18:5 +error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied + --> $DIR/not-send.rs:18:15 | -11 | fn is_send() - | ------- required by a bound in this -12 | where -13 | T: Send, - | ---- required by this bound in `is_send` -... 18 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + | ^^^^^^^^ --- help: remove this generic argument + | | + | expected 3 generic arguments | - = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` - = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, _, _, 4_usize>` +note: struct defined here, with 3 generic parameters: `T`, `U`, `N` + --> $DIR/split.rs:26:12 + | +26 | pub struct Consumer<'a, T, U, const N: usize> + | ^^^^^^^^ - - - -error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:19:5 +error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied + --> $DIR/not-send.rs:19:15 | -11 | fn is_send() - | ------- required by a bound in this -12 | where -13 | T: Send, - | ---- required by this bound in `is_send` -... 19 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + | ^^^^^^^^ --- help: remove this generic argument + | | + | expected 3 generic arguments | - = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` - = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, _, _, 4_usize>` - -error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:20:5 +note: struct defined here, with 3 generic parameters: `T`, `U`, `N` + --> $DIR/split.rs:43:12 | -11 | fn is_send() - | ------- required by a bound in this -12 | where -13 | T: Send, - | ---- required by this bound in `is_send` -... -20 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely - | - = help: within `Queue, _, _, 4_usize>`, the trait `Send` is not implemented for `*const ()` - = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because it appears within the type `[PhantomData<*const ()>; 4]` - = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` - = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` - = note: required because it appears within the type `Queue, _, _, 4_usize>` +43 | pub struct Producer<'a, T, U, const N: usize> + | ^^^^^^^^ - - - + +error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied + --> $DIR/not-send.rs:20:15 + | +20 | is_send::>(); + | ^^^^^ --- help: remove this generic argument + | | + | expected 3 generic arguments + | +note: struct defined here, with 3 generic parameters: `T`, `U`, `N` + --> $DIR/mod.rs:151:12 + | +151 | pub struct Queue + | ^^^^^ - - - error[E0277]: `*const ()` cannot be sent between threads safely --> $DIR/not-send.rs:21:5 diff --git a/src/sealed.rs b/src/sealed.rs index 092485d187..bd0ac12e38 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -1,27 +1,7 @@ /// Sealed traits and implementations for `spsc` pub mod spsc { #[cfg(has_atomics)] - use crate::spsc::{MultiCore, SingleCore}; - #[cfg(has_atomics)] - use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering}; - - pub unsafe trait XCore { - fn is_multi_core() -> bool; - } - - #[cfg(has_atomics)] - unsafe impl XCore for SingleCore { - fn is_multi_core() -> bool { - false - } - } - - #[cfg(has_atomics)] - unsafe impl XCore for MultiCore { - fn is_multi_core() -> bool { - true - } - } + use core::sync::atomic::{AtomicU16, AtomicU8, AtomicUsize, Ordering}; pub unsafe trait Uxx: Into + Send { #[doc(hidden)] @@ -32,9 +12,7 @@ pub mod spsc { #[cfg(has_atomics)] #[doc(hidden)] - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore; + unsafe fn load_acquire(x: *const Self) -> Self; #[cfg(has_atomics)] #[doc(hidden)] @@ -42,9 +20,7 @@ pub mod spsc { #[cfg(has_atomics)] #[doc(hidden)] - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore; + unsafe fn store_release(x: *const Self, val: Self); } unsafe impl Uxx for u8 { @@ -62,17 +38,11 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU8)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y - } + unsafe fn load_acquire(x: *const Self) -> Self { + (*(x as *const AtomicU8)).load(Ordering::Acquire) + // let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read + // atomic::compiler_fence(Ordering::Acquire); // ▼ + // y } #[cfg(has_atomics)] @@ -81,16 +51,10 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU8)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write - } + unsafe fn store_release(x: *const Self, val: Self) { + (*(x as *const AtomicU8)).store(val, Ordering::Release) + // atomic::compiler_fence(Ordering::Release); // ▲ + // (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write } } @@ -109,17 +73,11 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU16)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y - } + unsafe fn load_acquire(x: *const Self) -> Self { + (*(x as *const AtomicU16)).load(Ordering::Acquire) + // let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read + // atomic::compiler_fence(Ordering::Acquire); // ▼ + // y } #[cfg(has_atomics)] @@ -128,16 +86,10 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicU16)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write - } + unsafe fn store_release(x: *const Self, val: Self) { + (*(x as *const AtomicU16)).store(val, Ordering::Release) + // atomic::compiler_fence(Ordering::Release); // ▲ + // (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write } } @@ -151,17 +103,11 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn load_acquire(x: *const Self) -> Self - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicUsize)).load(Ordering::Acquire) - } else { - let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read - atomic::compiler_fence(Ordering::Acquire); // ▼ - y - } + unsafe fn load_acquire(x: *const Self) -> Self { + (*(x as *const AtomicUsize)).load(Ordering::Acquire) + // let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read + // atomic::compiler_fence(Ordering::Acquire); // ▼ + // y } #[cfg(has_atomics)] @@ -170,16 +116,10 @@ pub mod spsc { } #[cfg(has_atomics)] - unsafe fn store_release(x: *const Self, val: Self) - where - C: XCore, - { - if C::is_multi_core() { - (*(x as *const AtomicUsize)).store(val, Ordering::Release) - } else { - atomic::compiler_fence(Ordering::Release); // ▲ - (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write - } + unsafe fn store_release(x: *const Self, val: Self) { + (*(x as *const AtomicUsize)).store(val, Ordering::Release) + // atomic::compiler_fence(Ordering::Release); // ▲ + // (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write } } } diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index ef7820b0e1..f7b6e2c565 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -10,7 +10,7 @@ //! ``` //! use heapless::spsc::Queue; //! -//! let mut rb: Queue = Queue::new(); +//! let mut rb: Queue = Queue::new(); //! //! assert!(rb.enqueue(0).is_ok()); //! assert!(rb.enqueue(1).is_ok()); @@ -24,11 +24,11 @@ //! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode //! //! ``` -//! use heapless::spsc::{Queue, MultiCore}; +//! use heapless::spsc::{Queue}; //! //! // Notice, type signature needs to be explicit for now. //! // (min_const_eval, does not allow for default type assignments) -//! static mut Q: Queue = Queue::new(); +//! static mut Q: Queue = Queue::new(); //! //! enum Event { A, B } //! @@ -83,7 +83,7 @@ //! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` //! and `Ok` is returned by `enqueue`). -use core::{cell::UnsafeCell, fmt, hash, marker::PhantomData, mem::MaybeUninit, ptr}; +use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr}; use hash32; @@ -92,32 +92,23 @@ pub use split::{Consumer, Producer}; mod split; -/// Multi core synchronization - a memory barrier is used for synchronization -pub struct MultiCore; - -/// Single core synchronization - no memory barrier synchronization, just a compiler fence -pub struct SingleCore; - -// Atomic{U8,U16, Usize} with no CAS operations that works on targets that have "no atomic support" +// Atomic{U8, U16, Usize} with no CAS operations that works on targets that have "no atomic support" // according to their specification -pub(crate) struct Atomic { +pub(crate) struct Atomic { v: UnsafeCell, - c: PhantomData, } -impl Atomic { +impl Atomic { pub(crate) const fn new(v: U) -> Self { Atomic { v: UnsafeCell::new(v), - c: PhantomData, } } } -impl Atomic +impl Atomic where U: sealed::Uxx, - C: sealed::XCore, { fn get(&self) -> &U { unsafe { &*self.v.get() } @@ -128,7 +119,7 @@ where } fn load_acquire(&self) -> U { - unsafe { U::load_acquire::(self.v.get()) } + unsafe { U::load_acquire(self.v.get()) } } fn load_relaxed(&self) -> U { @@ -136,7 +127,7 @@ where } fn store_release(&self, val: U) { - unsafe { U::store_release::(self.v.get(), val) } + unsafe { U::store_release(self.v.get(), val) } } } @@ -153,33 +144,26 @@ where /// [`u8`]: struct.Queue.html#method.u8 /// [`u16`]: struct.Queue.html#method.u16 /// -/// *IMPORTANT*: `spsc::Queue<_, _, u8>` has a maximum capacity of 255 elements; `spsc::Queue<_, _, -/// u16>` has a maximum capacity of 65535 elements. -/// -/// `spsc::Queue` also comes in a single core variant. This variant can be created using the -/// following constructors: `u8_sc`, `u16_sc`, `usize_sc` and `new_sc`. This variant is `unsafe` to -/// create because the programmer must make sure that the queue's consumer and producer endpoints -/// (if split) are kept on a single core for their entire lifetime. +/// *IMPORTANT*: `spsc::Queue<_, u8, N>` has a maximum capacity of 255 elements; `spsc::Queue<_, +/// u16, N>` has a maximum capacity of 65535 elements. #[cfg(has_atomics)] -pub struct Queue +pub struct Queue where U: sealed::Uxx, - C: sealed::XCore, { // this is from where we dequeue items - pub(crate) head: Atomic, + pub(crate) head: Atomic, // this is where we enqueue new items - pub(crate) tail: Atomic, + pub(crate) tail: Atomic, pub(crate) buffer: MaybeUninit<[T; N]>, } -impl Queue +impl Queue where U: sealed::Uxx, - C: sealed::XCore, { /// Returns the maximum number of elements the queue can hold pub fn capacity(&self) -> U { @@ -192,7 +176,7 @@ where } /// Iterates from the front of the queue to the back - pub fn iter(&self) -> Iter<'_, T, U, C, N> { + pub fn iter(&self) -> Iter<'_, T, U, N> { Iter { rb: self, index: 0, @@ -201,7 +185,7 @@ where } /// Returns an iterator that allows modifying each value. - pub fn iter_mut(&mut self) -> IterMut<'_, T, U, C, N> { + pub fn iter_mut(&mut self) -> IterMut<'_, T, U, N> { let len = self.len_usize(); IterMut { rb: self, @@ -218,10 +202,9 @@ where } } -impl Drop for Queue +impl Drop for Queue where U: sealed::Uxx, - C: sealed::XCore, { fn drop(&mut self) { for item in self { @@ -232,22 +215,20 @@ where } } -impl fmt::Debug for Queue +impl fmt::Debug for Queue where T: fmt::Debug, U: sealed::Uxx, - C: sealed::XCore, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } -impl hash::Hash for Queue +impl hash::Hash for Queue where T: hash::Hash, U: sealed::Uxx, - C: sealed::XCore, { fn hash(&self, state: &mut H) { // iterate over self in order @@ -257,11 +238,10 @@ where } } -impl hash32::Hash for Queue +impl hash32::Hash for Queue where T: hash32::Hash, U: sealed::Uxx, - C: sealed::XCore, { fn hash(&self, state: &mut H) { // iterate over self in order @@ -271,26 +251,24 @@ where } } -impl<'a, T, U, C, const N: usize> IntoIterator for &'a Queue +impl<'a, T, U, const N: usize> IntoIterator for &'a Queue where U: sealed::Uxx, - C: sealed::XCore, { type Item = &'a T; - type IntoIter = Iter<'a, T, U, C, N>; + type IntoIter = Iter<'a, T, U, N>; fn into_iter(self) -> Self::IntoIter { self.iter() } } -impl<'a, T, U, C, const N: usize> IntoIterator for &'a mut Queue +impl<'a, T, U, const N: usize> IntoIterator for &'a mut Queue where U: sealed::Uxx, - C: sealed::XCore, { type Item = &'a mut T; - type IntoIter = IterMut<'a, T, U, C, N>; + type IntoIter = IterMut<'a, T, U, N>; fn into_iter(self) -> Self::IntoIter { self.iter_mut() @@ -298,8 +276,8 @@ where } macro_rules! impl_ { - ($uxx:ident, $uxx_sc:ident) => { - impl Queue { + ($uxx:ident) => { + impl Queue { /// Creates an empty queue with a fixed capacity of `N` pub const fn $uxx() -> Self { Self { @@ -310,21 +288,7 @@ macro_rules! impl_ { } } - impl Queue { - /// Creates an empty queue with a fixed capacity of `N` (single core variant) - pub const unsafe fn $uxx_sc() -> Self { - Self { - buffer: MaybeUninit::uninit(), - head: Atomic::new(0), - tail: Atomic::new(0), - } - } - } - - impl Queue - where - C: sealed::XCore, - { + impl Queue { /// Returns a reference to the item in the front of the queue without dequeuing, or /// `None` if the queue is empty. /// @@ -332,7 +296,7 @@ macro_rules! impl_ { /// ``` /// use heapless::spsc::Queue; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::u8(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); @@ -419,13 +383,12 @@ macro_rules! impl_ { } } - impl Clone for Queue + impl Clone for Queue where T: Clone, - C: sealed::XCore, { fn clone(&self) -> Self { - let mut new: Queue = Queue { + let mut new: Queue = Queue { buffer: MaybeUninit::uninit(), head: Atomic::new(0), tail: Atomic::new(0), @@ -444,62 +407,49 @@ macro_rules! impl_ { }; } -impl Queue { +impl Queue { /// Alias for [`spsc::Queue::usize`](struct.Queue.html#method.usize) pub const fn new() -> Self { Queue::usize() } } -impl Queue { - /// Alias for [`spsc::Queue::usize_sc`](struct.Queue.html#method.usize_sc) - pub unsafe fn new_sc() -> Self { - Queue::usize_sc() - } -} +impl_!(u8); +impl_!(u16); +impl_!(usize); -impl_!(u8, u8_sc); -impl_!(u16, u16_sc); -impl_!(usize, usize_sc); - -impl PartialEq> - for Queue +impl PartialEq> for Queue where T: PartialEq, U: sealed::Uxx, - C: sealed::XCore, U2: sealed::Uxx, - C2: sealed::XCore, { - fn eq(&self, other: &Queue) -> bool { + fn eq(&self, other: &Queue) -> bool { self.len_usize() == other.len_usize() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2) } } -impl Eq for Queue +impl Eq for Queue where T: Eq, U: sealed::Uxx, - C: sealed::XCore, { } /// An iterator over the items of a queue -pub struct Iter<'a, T, U, C, const N: usize> +pub struct Iter<'a, T, U, const N: usize> where U: sealed::Uxx, - C: sealed::XCore, { - rb: &'a Queue, + rb: &'a Queue, index: usize, len: usize, } -impl<'a, T, U, C, const N: usize> Clone for Iter<'a, T, U, C, N> +impl<'a, T, U, const N: usize> Clone for Iter<'a, T, U, N> where U: sealed::Uxx, - C: sealed::XCore, { fn clone(&self) -> Self { Self { @@ -511,22 +461,20 @@ where } /// A mutable iterator over the items of a queue -pub struct IterMut<'a, T, U, C, const N: usize> +pub struct IterMut<'a, T, U, const N: usize> where U: sealed::Uxx, - C: sealed::XCore, { - rb: &'a mut Queue, + rb: &'a mut Queue, index: usize, len: usize, } macro_rules! iterator { (struct $name:ident -> $elem:ty, $ptr:ty, $asptr:ident, $mkref:ident) => { - impl<'a, T, U, C, const N: usize> Iterator for $name<'a, T, U, C, N> + impl<'a, T, U, const N: usize> Iterator for $name<'a, T, U, N> where U: sealed::Uxx, - C: sealed::XCore, { type Item = $elem; @@ -545,10 +493,9 @@ macro_rules! iterator { } } - impl<'a, T, U, C, const N: usize> DoubleEndedIterator for $name<'a, T, U, C, N> + impl<'a, T, U, const N: usize> DoubleEndedIterator for $name<'a, T, U, N> where U: sealed::Uxx, - C: sealed::XCore, { fn next_back(&mut self) -> Option<$elem> { if self.index < self.len { @@ -587,21 +534,16 @@ iterator!(struct IterMut -> &'a mut T, *mut T, as_mut_ptr, make_ref_mut); mod tests { use hash32::Hasher; - use crate::spsc::{MultiCore, Queue, SingleCore}; - - #[test] - fn static_usize_sc() { - static mut _Q: Queue = unsafe { Queue::usize_sc() }; - } + use crate::spsc::Queue; #[test] fn static_usize() { - static mut _Q: Queue = Queue::usize(); + static mut _Q: Queue = Queue::usize(); } #[test] fn static_new() { - static mut _Q: Queue = Queue::new(); + static mut _Q: Queue = Queue::new(); } #[test] @@ -627,7 +569,7 @@ mod tests { static mut COUNT: i32 = 0; { - let mut v: Queue = unsafe { Queue::usize_sc() }; + let mut v: Queue = Queue::usize(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); v.dequeue().unwrap(); @@ -636,7 +578,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut v: Queue = Queue::usize(); + let mut v: Queue = Queue::usize(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); } @@ -646,7 +588,7 @@ mod tests { #[test] fn full() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -658,7 +600,7 @@ mod tests { #[test] fn iter() { - let mut rb: Queue = Queue::u16(); + let mut rb: Queue = Queue::u16(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -674,7 +616,7 @@ mod tests { #[test] fn iter_double_ended() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -691,7 +633,7 @@ mod tests { #[test] fn iter_overflow() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); for _ in 0..300 { @@ -705,7 +647,7 @@ mod tests { #[test] fn iter_mut() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -721,7 +663,7 @@ mod tests { #[test] fn iter_mut_double_ended() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -738,7 +680,7 @@ mod tests { #[test] fn sanity() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); assert_eq!(rb.dequeue(), None); rb.enqueue(0).unwrap(); assert_eq!(rb.dequeue(), Some(0)); @@ -748,7 +690,7 @@ mod tests { #[test] #[cfg(feature = "smaller-atomics")] fn u8() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); for _ in 0..255 { rb.enqueue(0).unwrap(); @@ -759,7 +701,7 @@ mod tests { #[test] fn wrap_around() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -775,7 +717,7 @@ mod tests { #[test] fn ready_flag() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); let (mut p, mut c) = rb.split(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); @@ -803,7 +745,7 @@ mod tests { #[test] fn clone() { - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -818,12 +760,12 @@ mod tests { fn eq() { // generate two queues with same content // but different buffer alignment - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); - let mut rb2: Queue = Queue::u8(); + let mut rb2: Queue = Queue::u8(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); assert!(rb1 == rb2); @@ -844,7 +786,7 @@ mod tests { // generate two queues with same content // but different buffer alignment let rb1 = { - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::u8(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -852,7 +794,7 @@ mod tests { rb1 }; let rb2 = { - let mut rb2: Queue = Queue::u8(); + let mut rb2: Queue = Queue::u8(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); rb2 diff --git a/src/spsc/split.rs b/src/spsc/split.rs index 5cd6264a6e..88dcc90cef 100644 --- a/src/spsc/split.rs +++ b/src/spsc/split.rs @@ -1,18 +1,13 @@ use core::{marker::PhantomData, ptr::NonNull}; -use crate::{ - sealed::spsc as sealed, - spsc::Queue, - // spsc::{MultiCore, Queue}, // we cannot currently default to MultiCore -}; +use crate::{sealed::spsc as sealed, spsc::Queue}; -impl Queue +impl Queue where U: sealed::Uxx, - C: sealed::XCore, { /// Splits a statically allocated queue into producer and consumer end points - pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, C, N>, Consumer<'rb, T, U, C, N>) { + pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, N>, Consumer<'rb, T, U, N>) { ( Producer { rb: unsafe { NonNull::new_unchecked(self) }, @@ -28,48 +23,41 @@ where /// A queue "consumer"; it can dequeue items from the queue // NOTE the consumer semantically owns the `head` pointer of the queue -pub struct Consumer<'a, T, U, C, const N: usize> +pub struct Consumer<'a, T, U, const N: usize> where U: sealed::Uxx, - C: sealed::XCore, { - rb: NonNull>, + rb: NonNull>, _marker: PhantomData<&'a ()>, } -unsafe impl<'a, T, U, C, const N: usize> Send for Consumer<'a, T, U, C, N> +unsafe impl<'a, T, U, const N: usize> Send for Consumer<'a, T, U, N> where T: Send, U: sealed::Uxx, - C: sealed::XCore, { } /// A queue "producer"; it can enqueue items into the queue // NOTE the producer semantically owns the `tail` pointer of the queue -pub struct Producer<'a, T, U, C, const N: usize> +pub struct Producer<'a, T, U, const N: usize> where U: sealed::Uxx, - C: sealed::XCore, { - rb: NonNull>, + rb: NonNull>, _marker: PhantomData<&'a ()>, } -unsafe impl<'a, T, U, C, const N: usize> Send for Producer<'a, T, U, C, N> +unsafe impl<'a, T, U, const N: usize> Send for Producer<'a, T, U, N> where T: Send, U: sealed::Uxx, - C: sealed::XCore, { } macro_rules! impl_ { ($uxx:ident) => { - impl<'a, T, C, const N: usize> Consumer<'a, T, $uxx, C, N> - where - C: sealed::XCore, - { + impl<'a, T, const N: usize> Consumer<'a, T, $uxx, N> { /// Returns if there are any items to dequeue. When this returns true, at least the /// first subsequent dequeue will succeed. pub fn ready(&self) -> bool { @@ -84,7 +72,7 @@ macro_rules! impl_ { /// ``` /// use heapless::spsc::Queue; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::u8(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); @@ -165,10 +153,7 @@ macro_rules! impl_ { } } - impl<'a, T, C, const N: usize> Producer<'a, T, $uxx, C, N> - where - C: sealed::XCore, - { + impl<'a, T, const N: usize> Producer<'a, T, $uxx, N> { /// Returns if there is any space to enqueue a new item. When this returns true, at /// least the first subsequent enqueue will succeed. pub fn ready(&self) -> bool { @@ -259,11 +244,11 @@ impl_!(usize); #[cfg(test)] mod tests { - use crate::spsc::{MultiCore, Queue}; + use crate::spsc::Queue; #[test] fn sanity() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::u8(); let (mut p, mut c) = rb.split(); diff --git a/tests/cpass.rs b/tests/cpass.rs index 18d88d194b..f525ee5688 100644 --- a/tests/cpass.rs +++ b/tests/cpass.rs @@ -1,7 +1,7 @@ //! Collections of `Send`-able things are `Send` use heapless::{ - spsc::{Consumer, MultiCore, Producer, Queue}, + spsc::{Consumer, Producer, Queue}, HistoryBuffer, Vec, }; @@ -17,9 +17,9 @@ fn send() { { } - is_send::>(); - is_send::>(); - is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); is_send::>(); is_send::>(); } diff --git a/tests/tsan.rs b/tests/tsan.rs index 9ea18add39..c3d6638460 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -4,15 +4,12 @@ use std::{sync::mpsc, thread}; -use heapless::{ - mpmc::Q64, - spsc::{self, MultiCore}, -}; +use heapless::{mpmc::Q64, spsc}; use scoped_threadpool::Pool; #[test] fn once() { - static mut RB: spsc::Queue = spsc::Queue::new(); + static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; @@ -33,7 +30,7 @@ fn once() { #[test] fn twice() { - static mut RB: spsc::Queue = spsc::Queue::new(); + static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; @@ -55,7 +52,7 @@ fn twice() { #[test] fn scoped() { - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(0).unwrap(); @@ -80,7 +77,7 @@ fn scoped() { fn contention() { const N: usize = 1024; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); { let (mut p, mut c) = rb.split(); @@ -167,7 +164,7 @@ fn mpmc_contention() { fn unchecked() { const N: usize = 1024; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); for _ in 0..N / 2 { rb.enqueue(1).unwrap(); @@ -203,7 +200,7 @@ fn unchecked() { #[test] fn len_properly_wraps() { const N: usize = 3; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); @@ -220,7 +217,7 @@ fn len_properly_wraps() { #[test] fn iterator_properly_wraps() { const N: usize = 3; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); rb.dequeue(); From e0b3b3a179e382dd5cd642d1211e7f0ef0f6b047 Mon Sep 17 00:00:00 2001 From: Andrey Zgarbul Date: Fri, 2 Apr 2021 20:45:18 +0300 Subject: [PATCH 25/37] remove as_slice dependency --- Cargo.toml | 1 - src/pool/mod.rs | 26 ++------------------------ src/pool/singleton.rs | 28 ++-------------------------- 3 files changed, 4 insertions(+), 51 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e7ba77f30c..eaf5d54baa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,7 +33,6 @@ __trybuild = [] scoped_threadpool = "0.1.8" [dependencies] -as-slice = "0.2.0" hash32 = "0.1.0" [dependencies.serde] diff --git a/src/pool/mod.rs b/src/pool/mod.rs index 739165f070..f7441e2342 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -236,8 +236,6 @@ use core::{ ptr, }; -use as_slice::{AsMutSlice, AsSlice}; - pub use stack::Node; use stack::{Ptr, Stack}; @@ -384,13 +382,13 @@ impl Pool { /// memory block pub fn grow_exact(&self, memory: &'static mut MaybeUninit) -> usize where - A: AsMutSlice>, + A: AsMut<[Node]>, { if mem::size_of::() == 0 { return usize::max_value(); } - let nodes = unsafe { (*memory.as_mut_ptr()).as_mut_slice() }; + let nodes = unsafe { (*memory.as_mut_ptr()).as_mut() }; let cap = nodes.len(); for p in nodes { match () { @@ -441,26 +439,6 @@ unsafe impl Sync for Box where T: Sync {} unsafe impl stable_deref_trait::StableDeref for Box {} -impl AsSlice for Box -where - A: AsSlice, -{ - type Element = A::Element; - - fn as_slice(&self) -> &[A::Element] { - self.deref().as_slice() - } -} - -impl AsMutSlice for Box -where - A: AsMutSlice, -{ - fn as_mut_slice(&mut self) -> &mut [A::Element] { - self.deref_mut().as_mut_slice() - } -} - impl Deref for Box { type Target = T; diff --git a/src/pool/singleton.rs b/src/pool/singleton.rs index ccfa2de955..ccc2d966a8 100644 --- a/src/pool/singleton.rs +++ b/src/pool/singleton.rs @@ -10,8 +10,6 @@ use core::{ ptr, }; -use as_slice::{AsMutSlice, AsSlice}; - use super::{Init, Node, Uninit}; /// Instantiates a pool as a global singleton @@ -80,7 +78,7 @@ pub trait Pool { /// memory block fn grow_exact(memory: &'static mut MaybeUninit) -> usize where - A: AsMutSlice>, + A: AsMut<[Node]>, { Self::ptr().grow_exact(memory) } @@ -123,7 +121,7 @@ where impl

Box where P: Pool, - P::Data: AsSlice, + P::Data: AsRef<[u8]>, { /// Freezes the contents of this memory block /// @@ -246,28 +244,6 @@ where { } -impl AsSlice for Box

-where - P: Pool, - P::Data: AsSlice, -{ - type Element = T; - - fn as_slice(&self) -> &[T] { - self.deref().as_slice() - } -} - -impl AsMutSlice for Box

-where - P: Pool, - P::Data: AsMutSlice, -{ - fn as_mut_slice(&mut self) -> &mut [T] { - self.deref_mut().as_mut_slice() - } -} - impl

PartialEq for Box

where P: Pool, From 618eebfcb930000828beee7be6461640bead656c Mon Sep 17 00:00:00 2001 From: Andrey Zgarbul Date: Fri, 2 Apr 2021 22:58:08 +0300 Subject: [PATCH 26/37] AsRef, AsMut for pool --- src/pool/mod.rs | 18 ++++++++++++++++++ src/pool/singleton.rs | 20 ++++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/src/pool/mod.rs b/src/pool/mod.rs index f7441e2342..3e72086071 100644 --- a/src/pool/mod.rs +++ b/src/pool/mod.rs @@ -439,6 +439,24 @@ unsafe impl Sync for Box where T: Sync {} unsafe impl stable_deref_trait::StableDeref for Box {} +impl AsRef<[T]> for Box +where + A: AsRef<[T]>, +{ + fn as_ref(&self) -> &[T] { + self.deref().as_ref() + } +} + +impl AsMut<[T]> for Box +where + A: AsMut<[T]>, +{ + fn as_mut(&mut self) -> &mut [T] { + self.deref_mut().as_mut() + } +} + impl Deref for Box { type Target = T; diff --git a/src/pool/singleton.rs b/src/pool/singleton.rs index ccc2d966a8..62de8c8f20 100644 --- a/src/pool/singleton.rs +++ b/src/pool/singleton.rs @@ -244,6 +244,26 @@ where { } +impl AsRef<[T]> for Box

+where + P: Pool, + P::Data: AsRef<[T]>, +{ + fn as_ref(&self) -> &[T] { + self.deref().as_ref() + } +} + +impl AsMut<[T]> for Box

+where + P: Pool, + P::Data: AsMut<[T]>, +{ + fn as_mut(&mut self) -> &mut [T] { + self.deref_mut().as_mut() + } +} + impl

PartialEq for Box

where P: Pool, From a507b8ec3ea83a2500a8d355c4e17e10f70332d0 Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sat, 3 Apr 2021 14:02:09 +0300 Subject: [PATCH 27/37] Fix cfail tests in const generics --- cfail/ui/freeze.rs | 2 +- cfail/ui/freeze.stderr | 22 +++++------ cfail/ui/not-send.rs | 6 +-- cfail/ui/not-send.stderr | 79 ++++++++++++++++++++++------------------ 4 files changed, 57 insertions(+), 52 deletions(-) diff --git a/cfail/ui/freeze.rs b/cfail/ui/freeze.rs index b5e5ace6cf..b970189248 100644 --- a/cfail/ui/freeze.rs +++ b/cfail/ui/freeze.rs @@ -1,7 +1,7 @@ use heapless::{spsc::Queue}; fn main() { - let mut q: Queue = Queue::new(); + let mut q: Queue = Queue::new(); let (_p, mut _c) = q.split(); q.enqueue(0).unwrap(); diff --git a/cfail/ui/freeze.stderr b/cfail/ui/freeze.stderr index 40eabe4833..56c116e31b 100644 --- a/cfail/ui/freeze.stderr +++ b/cfail/ui/freeze.stderr @@ -1,13 +1,9 @@ -error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied - --> $DIR/freeze.rs:4:16 - | -4 | let mut q: Queue = Queue::new(); - | ^^^^^ --- help: remove this generic argument - | | - | expected 3 generic arguments - | -note: struct defined here, with 3 generic parameters: `T`, `U`, `N` - --> $DIR/mod.rs:151:12 - | -151 | pub struct Queue - | ^^^^^ - - - +error[E0499]: cannot borrow `q` as mutable more than once at a time + --> $DIR/freeze.rs:7:5 + | +6 | let (_p, mut _c) = q.split(); + | - first mutable borrow occurs here +7 | q.enqueue(0).unwrap(); + | ^ second mutable borrow occurs here +8 | _c.dequeue(); + | -- first borrow later used here diff --git a/cfail/ui/not-send.rs b/cfail/ui/not-send.rs index f5ea87c6b7..6912f2ac42 100644 --- a/cfail/ui/not-send.rs +++ b/cfail/ui/not-send.rs @@ -15,8 +15,8 @@ where } fn main() { - is_send::>(); - is_send::>(); - is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); is_send::>(); } diff --git a/cfail/ui/not-send.stderr b/cfail/ui/not-send.stderr index 544fc8ccef..c09e4468a6 100644 --- a/cfail/ui/not-send.stderr +++ b/cfail/ui/not-send.stderr @@ -1,44 +1,53 @@ -error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied - --> $DIR/not-send.rs:18:15 - | -18 | is_send::>(); - | ^^^^^^^^ --- help: remove this generic argument - | | - | expected 3 generic arguments +error[E0277]: `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:18:5 | -note: struct defined here, with 3 generic parameters: `T`, `U`, `N` - --> $DIR/split.rs:26:12 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +18 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -26 | pub struct Consumer<'a, T, U, const N: usize> - | ^^^^^^^^ - - - + = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, _, 4_usize>` -error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied - --> $DIR/not-send.rs:19:15 - | -19 | is_send::>(); - | ^^^^^^^^ --- help: remove this generic argument - | | - | expected 3 generic arguments +error[E0277]: `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:19:5 | -note: struct defined here, with 3 generic parameters: `T`, `U`, `N` - --> $DIR/split.rs:43:12 +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +19 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | -43 | pub struct Producer<'a, T, U, const N: usize> - | ^^^^^^^^ - - - + = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, _, 4_usize>` -error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied - --> $DIR/not-send.rs:20:15 - | -20 | is_send::>(); - | ^^^^^ --- help: remove this generic argument - | | - | expected 3 generic arguments - | -note: struct defined here, with 3 generic parameters: `T`, `U`, `N` - --> $DIR/mod.rs:151:12 - | -151 | pub struct Queue - | ^^^^^ - - - +error[E0277]: `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:20:5 + | +11 | fn is_send() + | ------- required by a bound in this +12 | where +13 | T: Send, + | ---- required by this bound in `is_send` +... +20 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + | + = help: within `Queue, _, 4_usize>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because it appears within the type `[PhantomData<*const ()>; 4]` + = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` + = note: required because it appears within the type `Queue, _, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely --> $DIR/not-send.rs:21:5 From 6bee0f8a9be892e05863a6f13dfb405489fb6b0c Mon Sep 17 00:00:00 2001 From: Roman Proskuryakov Date: Sat, 3 Apr 2021 22:20:09 +0300 Subject: [PATCH 28/37] Simplify code in histbuf --- src/histbuf.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/histbuf.rs b/src/histbuf.rs index 4fed656562..01cb000291 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -174,11 +174,7 @@ impl HistoryBuffer { /// Returns the array slice backing the buffer, without keeping track /// of the write position. Therefore, the element order is unspecified. pub fn as_slice(&self) -> &[T] { - if self.filled { - unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.capacity()) } - } else { - unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.write_at) } - } + unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) } } } @@ -276,6 +272,8 @@ mod tests { fn as_slice() { let mut x: HistoryBuffer = HistoryBuffer::new(); + assert_eq!(x.as_slice(), []); + x.extend([1, 2, 3, 4, 5].iter()); assert_eq!(x.as_slice(), [5, 2, 3, 4]); From 2bfe3fe8f4056408e48702c5796f66db81f13a4c Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 4 Apr 2021 12:09:14 +0200 Subject: [PATCH 29/37] Restore tests for SPSC as close as we can --- src/spsc/mod.rs | 30 +++++++++++++++--------------- src/spsc/split.rs | 2 +- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index f7b6e2c565..7fa72348e3 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -569,7 +569,7 @@ mod tests { static mut COUNT: i32 = 0; { - let mut v: Queue = Queue::usize(); + let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); v.dequeue().unwrap(); @@ -578,7 +578,7 @@ mod tests { assert_eq!(unsafe { COUNT }, 0); { - let mut v: Queue = Queue::usize(); + let mut v: Queue = Queue::new(); v.enqueue(Droppable::new()).ok().unwrap(); v.enqueue(Droppable::new()).ok().unwrap(); } @@ -588,7 +588,7 @@ mod tests { #[test] fn full() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -600,7 +600,7 @@ mod tests { #[test] fn iter() { - let mut rb: Queue = Queue::u16(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -616,7 +616,7 @@ mod tests { #[test] fn iter_double_ended() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -647,7 +647,7 @@ mod tests { #[test] fn iter_mut() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -663,7 +663,7 @@ mod tests { #[test] fn iter_mut_double_ended() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -680,7 +680,7 @@ mod tests { #[test] fn sanity() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); assert_eq!(rb.dequeue(), None); rb.enqueue(0).unwrap(); assert_eq!(rb.dequeue(), Some(0)); @@ -701,7 +701,7 @@ mod tests { #[test] fn wrap_around() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); rb.enqueue(0).unwrap(); rb.enqueue(1).unwrap(); @@ -717,7 +717,7 @@ mod tests { #[test] fn ready_flag() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); assert_eq!(c.ready(), false); assert_eq!(p.ready(), true); @@ -745,7 +745,7 @@ mod tests { #[test] fn clone() { - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -760,12 +760,12 @@ mod tests { fn eq() { // generate two queues with same content // but different buffer alignment - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); rb1.enqueue(0).unwrap(); - let mut rb2: Queue = Queue::u8(); + let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); assert!(rb1 == rb2); @@ -786,7 +786,7 @@ mod tests { // generate two queues with same content // but different buffer alignment let rb1 = { - let mut rb1: Queue = Queue::u8(); + let mut rb1: Queue = Queue::new(); rb1.enqueue(0).unwrap(); rb1.enqueue(0).unwrap(); rb1.dequeue().unwrap(); @@ -794,7 +794,7 @@ mod tests { rb1 }; let rb2 = { - let mut rb2: Queue = Queue::u8(); + let mut rb2: Queue = Queue::new(); rb2.enqueue(0).unwrap(); rb2.enqueue(0).unwrap(); rb2 diff --git a/src/spsc/split.rs b/src/spsc/split.rs index 88dcc90cef..cdec5a84f3 100644 --- a/src/spsc/split.rs +++ b/src/spsc/split.rs @@ -248,7 +248,7 @@ mod tests { #[test] fn sanity() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = Queue::new(); let (mut p, mut c) = rb.split(); From 1fac5935e5905583421b75cd2c22b2d0a347ae51 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 4 Apr 2021 12:55:02 +0200 Subject: [PATCH 30/37] Added unsafe and docs to Queue::{u8(), u16()} --- src/spsc/mod.rs | 24 ++++++++++++++++-------- src/spsc/split.rs | 2 +- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/spsc/mod.rs b/src/spsc/mod.rs index 7fa72348e3..435c1443a8 100644 --- a/src/spsc/mod.rs +++ b/src/spsc/mod.rs @@ -276,10 +276,10 @@ where } macro_rules! impl_ { - ($uxx:ident) => { + ($uxx:ident, $doc:tt $(,$unsf:ident)?) => { impl Queue { - /// Creates an empty queue with a fixed capacity of `N` - pub const fn $uxx() -> Self { + #[doc = $doc] + pub const $($unsf)* fn $uxx() -> Self { Self { head: Atomic::new(0), tail: Atomic::new(0), @@ -296,7 +296,7 @@ macro_rules! impl_ { /// ``` /// use heapless::spsc::Queue; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); @@ -414,9 +414,17 @@ impl Queue { } } -impl_!(u8); -impl_!(u16); -impl_!(usize); +impl_!( + u8, + "Creates an empty queue with a fixed capacity of `N`. **Safety**: Assumes `N <= u8::MAX`.", + unsafe +); +impl_!( + u16, + "Creates an empty queue with a fixed capacity of `N`. **Safety**: Assumes `N <= u16::MAX`.", + unsafe +); +impl_!(usize, "Creates an empty queue with a fixed capacity of `N`"); impl PartialEq> for Queue where @@ -633,7 +641,7 @@ mod tests { #[test] fn iter_overflow() { - let mut rb: Queue = Queue::u8(); + let mut rb: Queue = unsafe { Queue::u8() }; rb.enqueue(0).unwrap(); for _ in 0..300 { diff --git a/src/spsc/split.rs b/src/spsc/split.rs index cdec5a84f3..ee6b28815c 100644 --- a/src/spsc/split.rs +++ b/src/spsc/split.rs @@ -72,7 +72,7 @@ macro_rules! impl_ { /// ``` /// use heapless::spsc::Queue; /// - /// let mut queue: Queue = Queue::u8(); + /// let mut queue: Queue = Queue::new(); /// let (mut producer, mut consumer) = queue.split(); /// assert_eq!(None, consumer.peek()); /// producer.enqueue(1); From 2c248a31e9c97d185c430111293763f526a7005e Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 4 Apr 2021 13:18:49 +0200 Subject: [PATCH 31/37] Fixed so pool and MPMC works on thumbv6 --- Cargo.toml | 3 +++ build.rs | 5 +---- src/mpmc.rs | 12 +++++++----- src/pool/llsc.rs | 12 +++++++----- 4 files changed, 18 insertions(+), 14 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eaf5d54baa..312d3cce87 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,9 @@ __trybuild = [] [target.x86_64-unknown-linux-gnu.dev-dependencies] scoped_threadpool = "0.1.8" +[target.thumbv6m-none-eabi.dependencies] +atomic-polyfill = "0.1.2" + [dependencies] hash32 = "0.1.0" diff --git a/build.rs b/build.rs index 8a29d9e517..094b6cd262 100644 --- a/build.rs +++ b/build.rs @@ -24,10 +24,7 @@ fn main() -> Result<(), Box> { // built-in targets with no atomic / CAS support as of nightly-2019-12-17 // see the `no-atomics.sh` / `no-cas.sh` script sitting next to this file match &target[..] { - "thumbv6m-none-eabi" - | "msp430-none-elf" - | "riscv32i-unknown-none-elf" - | "riscv32imc-unknown-none-elf" => {} + "msp430-none-elf" | "riscv32i-unknown-none-elf" | "riscv32imc-unknown-none-elf" => {} _ => { println!("cargo:rustc-cfg=has_cas"); diff --git a/src/mpmc.rs b/src/mpmc.rs index 023d55835d..06b496b591 100644 --- a/src/mpmc.rs +++ b/src/mpmc.rs @@ -82,11 +82,13 @@ //! //! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue -use core::{ - cell::UnsafeCell, - mem::MaybeUninit, - sync::atomic::{AtomicU8, Ordering}, -}; +use core::{cell::UnsafeCell, mem::MaybeUninit}; + +#[cfg(armv6m)] +use atomic_polyfill::{AtomicU8, Ordering}; + +#[cfg(not(armv6m))] +use core::sync::atomic::{AtomicU8, Ordering}; /// MPMC queue with a capacity for 2 elements pub struct Q2 { diff --git a/src/pool/llsc.rs b/src/pool/llsc.rs index 1aec52761c..83081521d4 100644 --- a/src/pool/llsc.rs +++ b/src/pool/llsc.rs @@ -1,11 +1,13 @@ //! Stack based on LL/SC atomics pub use core::ptr::NonNull as Ptr; -use core::{ - cell::UnsafeCell, - ptr, - sync::atomic::{AtomicPtr, Ordering}, -}; +use core::{cell::UnsafeCell, ptr}; + +#[cfg(armv6m)] +use atomic_polyfill::{AtomicPtr, Ordering}; + +#[cfg(not(armv6m))] +use core::sync::atomic::{AtomicPtr, Ordering}; /// Unfortunate implementation detail required to use the /// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method From 0ffef8f9c90245716787f3988a062382baee815c Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 8 Apr 2021 12:05:46 +0200 Subject: [PATCH 32/37] Cleanup of comments --- src/sealed.rs | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/sealed.rs b/src/sealed.rs index bd0ac12e38..0dc6b17683 100644 --- a/src/sealed.rs +++ b/src/sealed.rs @@ -40,9 +40,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn load_acquire(x: *const Self) -> Self { (*(x as *const AtomicU8)).load(Ordering::Acquire) - // let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read - // atomic::compiler_fence(Ordering::Acquire); // ▼ - // y } #[cfg(has_atomics)] @@ -53,8 +50,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn store_release(x: *const Self, val: Self) { (*(x as *const AtomicU8)).store(val, Ordering::Release) - // atomic::compiler_fence(Ordering::Release); // ▲ - // (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write } } @@ -75,9 +70,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn load_acquire(x: *const Self) -> Self { (*(x as *const AtomicU16)).load(Ordering::Acquire) - // let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read - // atomic::compiler_fence(Ordering::Acquire); // ▼ - // y } #[cfg(has_atomics)] @@ -88,8 +80,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn store_release(x: *const Self, val: Self) { (*(x as *const AtomicU16)).store(val, Ordering::Release) - // atomic::compiler_fence(Ordering::Release); // ▲ - // (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write } } @@ -105,9 +95,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn load_acquire(x: *const Self) -> Self { (*(x as *const AtomicUsize)).load(Ordering::Acquire) - // let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read - // atomic::compiler_fence(Ordering::Acquire); // ▼ - // y } #[cfg(has_atomics)] @@ -118,8 +105,6 @@ pub mod spsc { #[cfg(has_atomics)] unsafe fn store_release(x: *const Self, val: Self) { (*(x as *const AtomicUsize)).store(val, Ordering::Release) - // atomic::compiler_fence(Ordering::Release); // ▲ - // (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write } } } From dfa5d5091f61c45b0faaeeb147e0a01716128b1d Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Thu, 8 Apr 2021 17:37:13 +0200 Subject: [PATCH 33/37] Updated changelog --- CHANGELOG.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 387bbf5d41..e930da029a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,17 @@ and this project adheres to [Semantic Versioning](http://semver.org/). ## [Unreleased] +### Changed + +- [breaking-change] Converted all data structures to use the `const generics` MVP +- [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data +- [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values +- [breaking-change] `MultiCore`/`SingleCore` is now removed from `spsc::Queue` +- [breaking-change] `spsc::Queue::u8()` and `spsc::Queue::u16()` are now unsafe +- `Pool` and `MPMC` now works on `thumbv6m` +- [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` +- [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` + ## [v0.6.1] - 2021-03-02 ### Fixed From 6fdcc4fb99c68aa3c6c1358f590159f049c14adc Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Sun, 18 Apr 2021 17:32:51 +0200 Subject: [PATCH 34/37] New SPSC queue, now safe and much simpler This commit implements a new, simplified, SPSC that does not have the reported issues (e.g. not properly wrapping the indexes for non powers-of-2), and the support for multiple different index sizes has been removed for simplicity. --- CHANGELOG.md | 3 +- cfail/ui/freeze.rs | 4 +- cfail/ui/not-send.rs | 10 +- cfail/ui/not-send.stderr | 82 +-- src/histbuf.rs | 2 +- src/spsc.rs | 850 ++++++++++++++++++++++++++++++++ src/{spsc => spsc_old}/mod.rs | 0 src/{spsc => spsc_old}/split.rs | 0 tests/cpass.rs | 6 +- tests/tsan.rs | 34 +- 10 files changed, 931 insertions(+), 60 deletions(-) create mode 100644 src/spsc.rs rename src/{spsc => spsc_old}/mod.rs (100%) rename src/{spsc => spsc_old}/split.rs (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index e930da029a..c1c72846ec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data - [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values - [breaking-change] `MultiCore`/`SingleCore` is now removed from `spsc::Queue` -- [breaking-change] `spsc::Queue::u8()` and `spsc::Queue::u16()` are now unsafe +- [breaking-change] `spsc::Queue` is now `usize` only +- [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207) - `Pool` and `MPMC` now works on `thumbv6m` - [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` - [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` diff --git a/cfail/ui/freeze.rs b/cfail/ui/freeze.rs index b970189248..12bbbcc238 100644 --- a/cfail/ui/freeze.rs +++ b/cfail/ui/freeze.rs @@ -1,7 +1,7 @@ -use heapless::{spsc::Queue}; +use heapless::spsc::Queue; fn main() { - let mut q: Queue = Queue::new(); + let mut q: Queue = Queue::new(); let (_p, mut _c) = q.split(); q.enqueue(0).unwrap(); diff --git a/cfail/ui/not-send.rs b/cfail/ui/not-send.rs index 6912f2ac42..0c8559d9d3 100644 --- a/cfail/ui/not-send.rs +++ b/cfail/ui/not-send.rs @@ -4,6 +4,7 @@ use core::marker::PhantomData; use heapless::{ spsc::{Consumer, Producer, Queue}, + HistoryBuffer, Vec, }; type NotSend = PhantomData<*const ()>; @@ -15,8 +16,9 @@ where } fn main() { - is_send::>(); - is_send::>(); - is_send::>(); - is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); } diff --git a/cfail/ui/not-send.stderr b/cfail/ui/not-send.stderr index c09e4468a6..97ed4e8687 100644 --- a/cfail/ui/not-send.stderr +++ b/cfail/ui/not-send.stderr @@ -1,65 +1,66 @@ error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:18:5 + --> $DIR/not-send.rs:19:5 | -11 | fn is_send() +12 | fn is_send() | ------- required by a bound in this -12 | where -13 | T: Send, +13 | where +14 | T: Send, | ---- required by this bound in `is_send` ... -18 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely +19 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, _, 4_usize>` + = note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:19:5 + --> $DIR/not-send.rs:20:5 | -11 | fn is_send() +12 | fn is_send() | ------- required by a bound in this -12 | where -13 | T: Send, +13 | where +14 | T: Send, | ---- required by this bound in `is_send` ... -19 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely +20 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | = help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()` = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, _, 4_usize>` + = note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:20:5 + --> $DIR/not-send.rs:21:5 | -11 | fn is_send() +12 | fn is_send() | ------- required by a bound in this -12 | where -13 | T: Send, +13 | where +14 | T: Send, | ---- required by this bound in `is_send` ... -20 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely +21 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | - = help: within `Queue, _, 4_usize>`, the trait `Send` is not implemented for `*const ()` + = help: within `Queue, 4_usize>`, the trait `Send` is not implemented for `*const ()` = note: required because it appears within the type `PhantomData<*const ()>` - = note: required because it appears within the type `[PhantomData<*const ()>; 4]` - = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` - = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` - = note: required because it appears within the type `Queue, _, 4_usize>` + = note: required because it appears within the type `ManuallyDrop>` + = note: required because it appears within the type `MaybeUninit>` + = note: required because it appears within the type `UnsafeCell>>` + = note: required because it appears within the type `[UnsafeCell>>; 4]` + = note: required because it appears within the type `Queue, 4_usize>` error[E0277]: `*const ()` cannot be sent between threads safely - --> $DIR/not-send.rs:21:5 + --> $DIR/not-send.rs:22:5 | -11 | fn is_send() +12 | fn is_send() | ------- required by a bound in this -12 | where -13 | T: Send, +13 | where +14 | T: Send, | ---- required by this bound in `is_send` ... -21 | is_send::>(); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely +22 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely | = help: within `heapless::Vec, 4_usize>`, the trait `Send` is not implemented for `*const ()` = note: required because it appears within the type `PhantomData<*const ()>` @@ -67,3 +68,22 @@ error[E0277]: `*const ()` cannot be sent between threads safely = note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>` = note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>` = note: required because it appears within the type `heapless::Vec, 4_usize>` + +error[E0277]: `*const ()` cannot be sent between threads safely + --> $DIR/not-send.rs:23:5 + | +12 | fn is_send() + | ------- required by a bound in this +13 | where +14 | T: Send, + | ---- required by this bound in `is_send` +... +23 | is_send::>(); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely + | + = help: within `HistoryBuffer, 4_usize>`, the trait `Send` is not implemented for `*const ()` + = note: required because it appears within the type `PhantomData<*const ()>` + = note: required because it appears within the type `ManuallyDrop>` + = note: required because it appears within the type `MaybeUninit>` + = note: required because it appears within the type `[MaybeUninit>; 4]` + = note: required because it appears within the type `HistoryBuffer, 4_usize>` diff --git a/src/histbuf.rs b/src/histbuf.rs index 01cb000291..2a6ce44ad9 100644 --- a/src/histbuf.rs +++ b/src/histbuf.rs @@ -12,7 +12,7 @@ use core::slice; /// ``` /// use heapless::HistoryBuffer; /// -/// // Initialize a new buffer with 8 elements, all initially zero. +/// // Initialize a new buffer with 8 elements. /// let mut buf = HistoryBuffer::<_, 8>::new(); /// /// // Starts with no data diff --git a/src/spsc.rs b/src/spsc.rs new file mode 100644 index 0000000000..76ecc3005f --- /dev/null +++ b/src/spsc.rs @@ -0,0 +1,850 @@ +//! Fixed capacity Single Producer Single Consumer (SPSC) queue +//! +//! Implementation based on https://www.codeproject.com/Articles/43510/Lock-Free-Single-Producer-Single-Consumer-Circular +//! +//! NOTE: This module is not available on targets that do *not* support atomic loads, e.g. RISC-V +//! cores w/o the A (Atomic) extension +//! +//! # Examples +//! +//! - `Queue` can be used as a plain queue +//! +//! ``` +//! use heapless::spsc::Queue; +//! +//! let mut rb: Queue = Queue::new(); +//! +//! assert!(rb.enqueue(0).is_ok()); +//! assert!(rb.enqueue(1).is_ok()); +//! assert!(rb.enqueue(2).is_ok()); +//! assert!(rb.enqueue(3).is_err()); // full +//! +//! assert_eq!(rb.dequeue(), Some(0)); +//! ``` +//! +//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode +//! +//! ``` +//! use heapless::spsc::Queue; +//! +//! // Notice, type signature needs to be explicit for now. +//! // (min_const_eval, does not allow for default type assignments) +//! static mut Q: Queue = Queue::new(); +//! +//! enum Event { A, B } +//! +//! fn main() { +//! // NOTE(unsafe) beware of aliasing the `consumer` end point +//! let mut consumer = unsafe { Q.split().1 }; +//! +//! loop { +//! // `dequeue` is a lockless operation +//! match consumer.dequeue() { +//! Some(Event::A) => { /* .. */ }, +//! Some(Event::B) => { /* .. */ }, +//! None => { /* sleep */ }, +//! } +//! # break +//! } +//! } +//! +//! // this is a different execution context that can preempt `main` +//! fn interrupt_handler() { +//! // NOTE(unsafe) beware of aliasing the `producer` end point +//! let mut producer = unsafe { Q.split().0 }; +//! # let condition = true; +//! +//! // .. +//! +//! if condition { +//! producer.enqueue(Event::A).ok().unwrap(); +//! } else { +//! producer.enqueue(Event::B).ok().unwrap(); +//! } +//! +//! // .. +//! } +//! ``` +//! +//! # Benchmarks +//! +//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles +//! +//! `-C opt-level` |`3`| +//! -----------------------|---| +//! `Consumer::dequeue`| 15| +//! `Queue::dequeue` | 12| +//! `Producer::enqueue`| 16| +//! `Queue::enqueue` | 14| +//! +//! - All execution times are in clock cycles. 1 clock cycle = 125 ns. +//! - Execution time is *dependent* of `mem::size_of::()`. Both operations include one +//! `memcpy(T)` in their successful path. +//! - The optimization level is indicated in the first row. +//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue` +//! and `Ok` is returned by `enqueue`). + +use core::{ + cell::UnsafeCell, + fmt, hash, + mem::MaybeUninit, + ptr, + sync::atomic::{AtomicUsize, Ordering}, +}; + +/// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements +/// +/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `16`, `32`, +/// etc.). +pub struct Queue { + // this is from where we dequeue items + pub(crate) head: AtomicUsize, + + // this is where we enqueue new items + pub(crate) tail: AtomicUsize, + + pub(crate) buffer: [UnsafeCell>; N], +} + +impl Queue { + const INIT: UnsafeCell> = UnsafeCell::new(MaybeUninit::uninit()); + + #[inline] + fn increment(val: usize) -> usize { + (val + 1) % N + } + + /// Creates an empty queue with a fixed capacity of `N - 1` + pub const fn new() -> Self { + Queue { + head: AtomicUsize::new(0), + tail: AtomicUsize::new(0), + buffer: [Self::INIT; N], + } + } + + /// Returns the maximum number of elements the queue can hold + #[inline] + pub const fn capacity(&self) -> usize { + N - 1 + } + + /// Returns the number of elements in the queue + #[inline] + pub fn len(&self) -> usize { + let current_head = self.head.load(Ordering::Relaxed); + let current_tail = self.tail.load(Ordering::Relaxed); + + current_tail.wrapping_sub(current_head).wrapping_add(N) % N + } + + /// Returns `true` if the queue is empty + #[inline] + pub fn is_empty(&self) -> bool { + self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed) + } + + /// Returns `true` if the queue is full + #[inline] + pub fn is_full(&self) -> bool { + Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed) + } + + /// Iterates from the front of the queue to the back + pub fn iter(&self) -> Iter<'_, T, N> { + Iter { + rb: self, + index: 0, + len: self.len(), + } + } + + /// Returns an iterator that allows modifying each value + pub fn iter_mut(&mut self) -> IterMut<'_, T, N> { + let len = self.len(); + IterMut { + rb: self, + index: 0, + len, + } + } + + /// Adds an `item` to the end of the queue + /// + /// Returns back the `item` if the queue is full + #[inline] + pub fn enqueue(&mut self, val: T) -> Result<(), T> { + unsafe { self.inner_enqueue(val) } + } + + /// Returns the item in the front of the queue, or `None` if the queue is empty + #[inline] + pub fn dequeue(&mut self) -> Option { + unsafe { self.inner_dequeue() } + } + + /// Returns a reference to the item in the front of the queue without dequeuing, or + /// `None` if the queue is empty. + /// + /// # Examples + /// ``` + /// use heapless::spsc::Queue; + /// + /// let mut queue: Queue = Queue::new(); + /// let (mut producer, mut consumer) = queue.split(); + /// assert_eq!(None, consumer.peek()); + /// producer.enqueue(1); + /// assert_eq!(Some(&1), consumer.peek()); + /// assert_eq!(Some(1), consumer.dequeue()); + /// assert_eq!(None, consumer.peek()); + /// ``` + pub fn peek(&self) -> Option<&T> { + if !self.is_empty() { + let head = self.head.load(Ordering::Relaxed); + Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) }) + } else { + None + } + } + + // The memory for enqueueing is "owned" by the tail pointer. + // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue + // items without doing pointer arithmetic and accessing internal fields of this type. + unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> { + let current_tail = self.tail.load(Ordering::Relaxed); + let next_tail = Self::increment(current_tail); + + if next_tail != self.head.load(Ordering::Acquire) { + (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); + self.tail.store(next_tail, Ordering::Release); + + Ok(()) + } else { + Err(val) + } + } + + /// # Unsafety + /// + /// If the queue is full this operation will leak a value (T's destructor won't run on + /// the value that got overwritten by `item`), *and* will allow the `dequeue` operation + /// to create a copy of `item`, which could result in `T`'s destructor running on `item` + /// twice. + unsafe fn enqueue_unchecked(&mut self, val: T) { + let current_tail = self.tail.load(Ordering::Relaxed); + + (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); + self.tail + .store(Self::increment(current_tail), Ordering::Release); + } + + // The memory for dequeuing is "owned" by the head pointer,. + // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue + // items without doing pointer arithmetic and accessing internal fields of this type. + unsafe fn inner_dequeue(&self) -> Option { + let current_head = self.head.load(Ordering::Relaxed); + + if current_head == self.tail.load(Ordering::Acquire) { + None + } else { + let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); + + self.head + .store(Self::increment(current_head), Ordering::Release); + + Some(v) + } + } + + /// Splits a queue into producer and consumer endpoints + pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) { + (Producer { rb: self }, Consumer { rb: self }) + } +} + +impl Clone for Queue +where + T: Clone, +{ + fn clone(&self) -> Self { + let mut new: Queue = Queue::new(); + + for s in self.iter() { + unsafe { + // NOTE(unsafe) new.capacity() == self.capacity() <= self.len() + // no overflow possible + new.enqueue_unchecked(s.clone()); + } + } + + new + } +} + +impl PartialEq> for Queue +where + T: PartialEq, +{ + fn eq(&self, other: &Queue) -> bool { + self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2) + } +} + +impl Eq for Queue where T: Eq {} + +/// An iterator over the items of a queue +pub struct Iter<'a, T, const N: usize> { + rb: &'a Queue, + index: usize, + len: usize, +} + +impl<'a, T, const N: usize> Clone for Iter<'a, T, N> { + fn clone(&self) -> Self { + Self { + rb: self.rb, + index: self.index, + len: self.len, + } + } +} + +/// A mutable iterator over the items of a queue +pub struct IterMut<'a, T, const N: usize> { + rb: &'a mut Queue, + index: usize, + len: usize, +} + +impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> { + type Item = &'a T; + + fn next(&mut self) -> Option { + if self.index < self.len { + let head = self.rb.head.load(Ordering::Relaxed); + + let i = (head + self.index) % N; + self.index += 1; + + Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) + } else { + None + } + } +} + +impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> { + type Item = &'a mut T; + + fn next(&mut self) -> Option { + if self.index < self.len { + let head = self.rb.head.load(Ordering::Relaxed); + + let i = (head + self.index) % N; + self.index += 1; + + Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) + } else { + None + } + } +} + +impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> { + fn next_back(&mut self) -> Option { + if self.index < self.len { + let head = self.rb.head.load(Ordering::Relaxed); + + // self.len > 0, since it's larger than self.index > 0 + let i = (head + self.len - 1) % N; + self.len -= 1; + Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) }) + } else { + None + } + } +} + +impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> { + fn next_back(&mut self) -> Option { + if self.index < self.len { + let head = self.rb.head.load(Ordering::Relaxed); + + // self.len > 0, since it's larger than self.index > 0 + let i = (head + self.len - 1) % N; + self.len -= 1; + Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) }) + } else { + None + } + } +} + +impl Drop for Queue { + fn drop(&mut self) { + for item in self { + unsafe { + ptr::drop_in_place(item); + } + } + } +} + +impl fmt::Debug for Queue +where + T: fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +impl hash::Hash for Queue +where + T: hash::Hash, +{ + fn hash(&self, state: &mut H) { + // iterate over self in order + for t in self.iter() { + hash::Hash::hash(t, state); + } + } +} + +impl hash32::Hash for Queue +where + T: hash32::Hash, +{ + fn hash(&self, state: &mut H) { + // iterate over self in order + for t in self.iter() { + hash32::Hash::hash(t, state); + } + } +} + +impl<'a, T, const N: usize> IntoIterator for &'a Queue { + type Item = &'a T; + type IntoIter = Iter<'a, T, N>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +impl<'a, T, const N: usize> IntoIterator for &'a mut Queue { + type Item = &'a mut T; + type IntoIter = IterMut<'a, T, N>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +/// A queue "consumer"; it can dequeue items from the queue +/// NOTE the consumer semantically owns the `head` pointer of the queue +pub struct Consumer<'a, T, const N: usize> { + rb: &'a Queue, +} + +unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {} + +/// A queue "producer"; it can enqueue items into the queue +/// NOTE the producer semantically owns the `tail` pointer of the queue +pub struct Producer<'a, T, const N: usize> { + rb: &'a Queue, +} + +unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {} + +impl<'a, T, const N: usize> Consumer<'a, T, N> { + /// Returns the item in the front of the queue, or `None` if the queue is empty + #[inline] + pub fn dequeue(&mut self) -> Option { + unsafe { self.rb.inner_dequeue() } + } + + /// Returns if there are any items to dequeue. When this returns `true`, at least the + /// first subsequent dequeue will succeed + #[inline] + pub fn ready(&self) -> bool { + !self.rb.is_empty() + } + + /// Returns the number of elements in the queue + #[inline] + pub fn len(&self) -> usize { + self.rb.len() + } + + /// Returns the maximum number of elements the queue can hold + #[inline] + pub fn capacity(&self) -> usize { + self.rb.capacity() + } + + /// Returns the item in the front of the queue without dequeuing, or `None` if the queue is + /// empty + /// + /// # Examples + /// ``` + /// use heapless::spsc::Queue; + /// + /// let mut queue: Queue = Queue::new(); + /// let (mut producer, mut consumer) = queue.split(); + /// assert_eq!(None, consumer.peek()); + /// producer.enqueue(1); + /// assert_eq!(Some(&1), consumer.peek()); + /// assert_eq!(Some(1), consumer.dequeue()); + /// assert_eq!(None, consumer.peek()); + /// ``` + #[inline] + pub fn peek(&self) -> Option<&T> { + self.rb.peek() + } +} + +impl<'a, T, const N: usize> Producer<'a, T, N> { + /// Adds an `item` to the end of the queue + /// + /// Returns back the `item` if the queue is full + #[inline] + pub fn enqueue(&mut self, val: T) -> Result<(), T> { + unsafe { self.rb.inner_enqueue(val) } + } + + /// Returns if there is any space to enqueue a new item. When this returns true, at + /// least the first subsequent enqueue will succeed. + #[inline] + pub fn ready(&self) -> bool { + !self.rb.is_full() + } + + /// Returns the number of elements in the queue + #[inline] + pub fn len(&self) -> usize { + self.rb.len() + } + + /// Returns the maximum number of elements the queue can hold + #[inline] + pub fn capacity(&self) -> usize { + self.rb.capacity() + } +} + +#[cfg(test)] +mod tests { + use crate::spsc::Queue; + use hash32::Hasher; + + #[test] + fn full() { + let mut rb: Queue = Queue::new(); + + assert_eq!(rb.is_full(), false); + + rb.enqueue(1).unwrap(); + assert_eq!(rb.is_full(), false); + + rb.enqueue(2).unwrap(); + assert_eq!(rb.is_full(), true); + } + + #[test] + fn empty() { + let mut rb: Queue = Queue::new(); + + assert_eq!(rb.is_empty(), true); + + rb.enqueue(1).unwrap(); + assert_eq!(rb.is_empty(), false); + + rb.enqueue(2).unwrap(); + assert_eq!(rb.is_empty(), false); + } + + #[test] + fn len() { + let mut rb: Queue = Queue::new(); + + assert_eq!(rb.len(), 0); + + rb.enqueue(1).unwrap(); + assert_eq!(rb.len(), 1); + + rb.enqueue(2).unwrap(); + assert_eq!(rb.len(), 2); + + for _ in 0..1_000_000 { + let v = rb.dequeue().unwrap(); + println!("{}", v); + rb.enqueue(v).unwrap(); + assert_eq!(rb.len(), 2); + } + } + + #[test] + fn try_overflow() { + const N: usize = 23; + let mut rb: Queue = Queue::new(); + + for i in 0..N as i32 - 1 { + rb.enqueue(i).unwrap(); + } + + for _ in 0..1_000_000 { + for i in 0..N as i32 - 1 { + let d = rb.dequeue().unwrap(); + assert_eq!(d, i); + rb.enqueue(i).unwrap(); + } + } + } + + #[test] + fn sanity() { + let mut rb: Queue = Queue::new(); + + let (mut p, mut c) = rb.split(); + + assert_eq!(p.ready(), true); + + assert_eq!(c.ready(), false); + + assert_eq!(c.dequeue(), None); + + p.enqueue(0).unwrap(); + + assert_eq!(c.dequeue(), Some(0)); + } + + #[test] + fn static_new() { + static mut _Q: Queue = Queue::new(); + } + + #[test] + fn drop() { + struct Droppable; + impl Droppable { + fn new() -> Self { + unsafe { + COUNT += 1; + } + Droppable + } + } + + impl Drop for Droppable { + fn drop(&mut self) { + unsafe { + COUNT -= 1; + } + } + } + + static mut COUNT: i32 = 0; + + { + let mut v: Queue = Queue::new(); + v.enqueue(Droppable::new()).ok().unwrap(); + v.enqueue(Droppable::new()).ok().unwrap(); + v.dequeue().unwrap(); + } + + assert_eq!(unsafe { COUNT }, 0); + + { + let mut v: Queue = Queue::new(); + v.enqueue(Droppable::new()).ok().unwrap(); + v.enqueue(Droppable::new()).ok().unwrap(); + } + + assert_eq!(unsafe { COUNT }, 0); + } + + #[test] + fn iter() { + let mut rb: Queue = Queue::new(); + + rb.enqueue(0).unwrap(); + rb.dequeue().unwrap(); + rb.enqueue(1).unwrap(); + rb.enqueue(2).unwrap(); + rb.enqueue(3).unwrap(); + + let mut items = rb.iter(); + + // assert_eq!(items.next(), Some(&0)); + assert_eq!(items.next(), Some(&1)); + assert_eq!(items.next(), Some(&2)); + assert_eq!(items.next(), Some(&3)); + assert_eq!(items.next(), None); + } + + #[test] + fn iter_double_ended() { + let mut rb: Queue = Queue::new(); + + rb.enqueue(0).unwrap(); + rb.enqueue(1).unwrap(); + rb.enqueue(2).unwrap(); + + let mut items = rb.iter(); + + assert_eq!(items.next(), Some(&0)); + assert_eq!(items.next_back(), Some(&2)); + assert_eq!(items.next(), Some(&1)); + assert_eq!(items.next(), None); + assert_eq!(items.next_back(), None); + } + + #[test] + fn iter_mut() { + let mut rb: Queue = Queue::new(); + + rb.enqueue(0).unwrap(); + rb.enqueue(1).unwrap(); + rb.enqueue(2).unwrap(); + + let mut items = rb.iter_mut(); + + assert_eq!(items.next(), Some(&mut 0)); + assert_eq!(items.next(), Some(&mut 1)); + assert_eq!(items.next(), Some(&mut 2)); + assert_eq!(items.next(), None); + } + + #[test] + fn iter_mut_double_ended() { + let mut rb: Queue = Queue::new(); + + rb.enqueue(0).unwrap(); + rb.enqueue(1).unwrap(); + rb.enqueue(2).unwrap(); + + let mut items = rb.iter_mut(); + + assert_eq!(items.next(), Some(&mut 0)); + assert_eq!(items.next_back(), Some(&mut 2)); + assert_eq!(items.next(), Some(&mut 1)); + assert_eq!(items.next(), None); + assert_eq!(items.next_back(), None); + } + + #[test] + fn wrap_around() { + let mut rb: Queue = Queue::new(); + + rb.enqueue(0).unwrap(); + rb.enqueue(1).unwrap(); + rb.enqueue(2).unwrap(); + rb.dequeue().unwrap(); + rb.dequeue().unwrap(); + rb.dequeue().unwrap(); + rb.enqueue(3).unwrap(); + rb.enqueue(4).unwrap(); + + assert_eq!(rb.len(), 2); + } + + #[test] + fn ready_flag() { + let mut rb: Queue = Queue::new(); + let (mut p, mut c) = rb.split(); + assert_eq!(c.ready(), false); + assert_eq!(p.ready(), true); + + p.enqueue(0).unwrap(); + + assert_eq!(c.ready(), true); + assert_eq!(p.ready(), true); + + p.enqueue(1).unwrap(); + + assert_eq!(c.ready(), true); + assert_eq!(p.ready(), false); + + c.dequeue().unwrap(); + + assert_eq!(c.ready(), true); + assert_eq!(p.ready(), true); + + c.dequeue().unwrap(); + + assert_eq!(c.ready(), false); + assert_eq!(p.ready(), true); + } + + #[test] + fn clone() { + let mut rb1: Queue = Queue::new(); + rb1.enqueue(0).unwrap(); + rb1.enqueue(0).unwrap(); + rb1.dequeue().unwrap(); + rb1.enqueue(0).unwrap(); + let rb2 = rb1.clone(); + assert_eq!(rb1.capacity(), rb2.capacity()); + assert_eq!(rb1.len(), rb2.len()); + assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2)); + } + + #[test] + fn eq() { + // generate two queues with same content + // but different buffer alignment + let mut rb1: Queue = Queue::new(); + rb1.enqueue(0).unwrap(); + rb1.enqueue(0).unwrap(); + rb1.dequeue().unwrap(); + rb1.enqueue(0).unwrap(); + let mut rb2: Queue = Queue::new(); + rb2.enqueue(0).unwrap(); + rb2.enqueue(0).unwrap(); + assert!(rb1 == rb2); + // test for symmetry + assert!(rb2 == rb1); + // test for changes in content + rb1.enqueue(0).unwrap(); + assert!(rb1 != rb2); + rb2.enqueue(1).unwrap(); + assert!(rb1 != rb2); + // test for refexive relation + assert!(rb1 == rb1); + assert!(rb2 == rb2); + } + + #[test] + fn hash_equality() { + // generate two queues with same content + // but different buffer alignment + let rb1 = { + let mut rb1: Queue = Queue::new(); + rb1.enqueue(0).unwrap(); + rb1.enqueue(0).unwrap(); + rb1.dequeue().unwrap(); + rb1.enqueue(0).unwrap(); + rb1 + }; + let rb2 = { + let mut rb2: Queue = Queue::new(); + rb2.enqueue(0).unwrap(); + rb2.enqueue(0).unwrap(); + rb2 + }; + let hash1 = { + let mut hasher1 = hash32::FnvHasher::default(); + hash32::Hash::hash(&rb1, &mut hasher1); + let hash1 = hasher1.finish(); + hash1 + }; + let hash2 = { + let mut hasher2 = hash32::FnvHasher::default(); + hash32::Hash::hash(&rb2, &mut hasher2); + let hash2 = hasher2.finish(); + hash2 + }; + assert_eq!(hash1, hash2); + } +} diff --git a/src/spsc/mod.rs b/src/spsc_old/mod.rs similarity index 100% rename from src/spsc/mod.rs rename to src/spsc_old/mod.rs diff --git a/src/spsc/split.rs b/src/spsc_old/split.rs similarity index 100% rename from src/spsc/split.rs rename to src/spsc_old/split.rs diff --git a/tests/cpass.rs b/tests/cpass.rs index f525ee5688..95f0b7847b 100644 --- a/tests/cpass.rs +++ b/tests/cpass.rs @@ -17,9 +17,9 @@ fn send() { { } - is_send::>(); - is_send::>(); - is_send::>(); + is_send::>(); + is_send::>(); + is_send::>(); is_send::>(); is_send::>(); } diff --git a/tests/tsan.rs b/tests/tsan.rs index c3d6638460..f29b9303f3 100644 --- a/tests/tsan.rs +++ b/tests/tsan.rs @@ -9,7 +9,7 @@ use scoped_threadpool::Pool; #[test] fn once() { - static mut RB: spsc::Queue = spsc::Queue::new(); + static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; @@ -30,7 +30,7 @@ fn once() { #[test] fn twice() { - static mut RB: spsc::Queue = spsc::Queue::new(); + static mut RB: spsc::Queue = spsc::Queue::new(); let rb = unsafe { &mut RB }; @@ -52,7 +52,7 @@ fn twice() { #[test] fn scoped() { - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(0).unwrap(); @@ -77,7 +77,7 @@ fn scoped() { fn contention() { const N: usize = 1024; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); { let (mut p, mut c) = rb.split(); @@ -164,9 +164,9 @@ fn mpmc_contention() { fn unchecked() { const N: usize = 1024; - let mut rb: spsc::Queue = spsc::Queue::new(); + let mut rb: spsc::Queue = spsc::Queue::new(); - for _ in 0..N / 2 { + for _ in 0..N / 2 - 1 { rb.enqueue(1).unwrap(); } @@ -175,32 +175,30 @@ fn unchecked() { Pool::new(2).scoped(move |scope| { scope.execute(move || { - for _ in 0..N / 2 { - unsafe { - p.enqueue_unchecked(2); - } + for _ in 0..N / 2 - 1 { + p.enqueue(2).unwrap(); } }); scope.execute(move || { let mut sum: usize = 0; - for _ in 0..N / 2 { - sum = sum.wrapping_add(usize::from(unsafe { c.dequeue_unchecked() })); + for _ in 0..N / 2 - 1 { + sum = sum.wrapping_add(usize::from(c.dequeue().unwrap())); } - assert_eq!(sum, N / 2); + assert_eq!(sum, N / 2 - 1); }); }); } - assert_eq!(rb.len(), N / 2); + assert_eq!(rb.len(), N / 2 - 1); } #[test] fn len_properly_wraps() { - const N: usize = 3; - let mut rb: spsc::Queue = spsc::Queue::new(); + const N: usize = 4; + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); assert_eq!(rb.len(), 1); @@ -216,8 +214,8 @@ fn len_properly_wraps() { #[test] fn iterator_properly_wraps() { - const N: usize = 3; - let mut rb: spsc::Queue = spsc::Queue::new(); + const N: usize = 4; + let mut rb: spsc::Queue = spsc::Queue::new(); rb.enqueue(1).unwrap(); rb.dequeue(); From 58cb279aeca29222f773526b92fab18e5c34b4b7 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Mon, 19 Apr 2021 21:31:47 +0200 Subject: [PATCH 35/37] IndexMap::new() is now a const-fn --- CHANGELOG.md | 1 + Cargo.toml | 2 +- src/indexmap.rs | 22 ++++------------------ src/string.rs | 32 -------------------------------- 4 files changed, 6 insertions(+), 51 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index c1c72846ec..2d2d052730 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - `Pool` and `MPMC` now works on `thumbv6m` - [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` - [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` +- `IndexMap::new()` is now a `const-fn` ## [v0.6.1] - 2021-03-02 diff --git a/Cargo.toml b/Cargo.toml index 312d3cce87..f4c66eb04a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,7 +36,7 @@ scoped_threadpool = "0.1.8" atomic-polyfill = "0.1.2" [dependencies] -hash32 = "0.1.0" +hash32 = "0.2.1" [dependencies.serde] version = "1" diff --git a/src/indexmap.rs b/src/indexmap.rs index 1a3d2e1405..100d700903 100644 --- a/src/indexmap.rs +++ b/src/indexmap.rs @@ -360,25 +360,16 @@ where /// println!("{}: \"{}\"", book, review); /// } /// ``` -pub struct IndexMap -where - K: Eq + Hash, -{ +pub struct IndexMap { core: CoreMap, build_hasher: S, } -impl IndexMap, N> -where - K: Eq + Hash, - S: Default + Hasher, -{ +impl IndexMap, N> { /// Creates an empty `IndexMap`. - /// - /// **NOTE** This constructor will become a `const fn` in the future - pub fn new() -> Self { + pub const fn new() -> Self { IndexMap { - build_hasher: BuildHasherDefault::default(), + build_hasher: BuildHasherDefault::new(), core: CoreMap::new(), } } @@ -737,7 +728,6 @@ where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, - // N: ArrayLength> + ArrayLength>, { type Output = V; @@ -751,7 +741,6 @@ where K: Eq + Hash + Borrow, Q: ?Sized + Eq + Hash, S: BuildHasher, - // N: ArrayLength> + ArrayLength>, { fn index_mut(&mut self, key: &Q) -> &mut V { self.get_mut(key).expect("key not found") @@ -763,7 +752,6 @@ where K: Eq + Hash + Clone, V: Clone, S: Clone, - // N: ArrayLength> + ArrayLength>, { fn clone(&self) -> Self { Self { @@ -778,7 +766,6 @@ where K: Eq + Hash + fmt::Debug, V: fmt::Debug, S: BuildHasher, - // N: ArrayLength> + ArrayLength>, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -789,7 +776,6 @@ impl Default for IndexMap where K: Eq + Hash, S: BuildHasher + Default, - // N: ArrayLength> + ArrayLength>, { fn default() -> Self { IndexMap { diff --git a/src/string.rs b/src/string.rs index 23735fcea3..5848095d0b 100644 --- a/src/string.rs +++ b/src/string.rs @@ -389,38 +389,6 @@ impl PartialEq> for String { } } -// macro_rules! impl_eq { -// ($lhs:ty, $rhs:ty) => { -// impl<'a, 'b, N> PartialEq<$rhs> for $lhs -// where -// N: ArrayLength, -// { -// #[inline] -// fn eq(&self, other: &$rhs) -> bool { -// str::eq(&self[..], &other[..]) -// } -// #[inline] -// fn ne(&self, other: &$rhs) -> bool { -// str::ne(&self[..], &other[..]) -// } -// } - -// impl<'a, 'b, N> PartialEq<$lhs> for $rhs -// where -// N: ArrayLength, -// { -// #[inline] -// fn eq(&self, other: &$lhs) -> bool { -// str::eq(&self[..], &other[..]) -// } -// #[inline] -// fn ne(&self, other: &$lhs) -> bool { -// str::ne(&self[..], &other[..]) -// } -// } -// }; -// } - // String == str impl PartialEq for String { #[inline] From bf6728ef6ffd4bee61821ede1c2b432805b28e64 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Tue, 20 Apr 2021 08:59:10 +0200 Subject: [PATCH 36/37] Clarification in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d2d052730..d25d920d8f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/). - [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values - [breaking-change] `MultiCore`/`SingleCore` is now removed from `spsc::Queue` - [breaking-change] `spsc::Queue` is now `usize` only -- [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207) +- [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207), i.e. it creates an `N - 1` sized queue instead of the old that generated an size `N` queue - `Pool` and `MPMC` now works on `thumbv6m` - [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str` - [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut` From 8754c3dfd2754dcf8d38ab26ed24ec5441af4062 Mon Sep 17 00:00:00 2001 From: Emil Fresk Date: Tue, 20 Apr 2021 16:19:00 +0200 Subject: [PATCH 37/37] Added missing `{enqueue, dequeue}_unchecked` interfaces --- src/spsc.rs | 65 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 56 insertions(+), 9 deletions(-) diff --git a/src/spsc.rs b/src/spsc.rs index 76ecc3005f..df46ef69c7 100644 --- a/src/spsc.rs +++ b/src/spsc.rs @@ -224,18 +224,27 @@ impl Queue { } } + // The memory for enqueueing is "owned" by the tail pointer. + // NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue + // items without doing pointer arithmetic and accessing internal fields of this type. + unsafe fn inner_enqueue_unchecked(&self, val: T) { + let current_tail = self.tail.load(Ordering::Relaxed); + + (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); + self.tail + .store(Self::increment(current_tail), Ordering::Release); + } + + /// Adds an `item` to the end of the queue, without checking if it's full + /// /// # Unsafety /// /// If the queue is full this operation will leak a value (T's destructor won't run on /// the value that got overwritten by `item`), *and* will allow the `dequeue` operation /// to create a copy of `item`, which could result in `T`'s destructor running on `item` /// twice. - unsafe fn enqueue_unchecked(&mut self, val: T) { - let current_tail = self.tail.load(Ordering::Relaxed); - - (self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val)); - self.tail - .store(Self::increment(current_tail), Ordering::Release); + pub unsafe fn enqueue_unchecked(&mut self, val: T) { + self.inner_enqueue_unchecked(val) } // The memory for dequeuing is "owned" by the head pointer,. @@ -256,6 +265,29 @@ impl Queue { } } + // The memory for dequeuing is "owned" by the head pointer,. + // NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue + // items without doing pointer arithmetic and accessing internal fields of this type. + unsafe fn inner_dequeue_unchecked(&self) -> T { + let current_head = self.head.load(Ordering::Relaxed); + let v = (self.buffer.get_unchecked(current_head).get() as *const T).read(); + + self.head + .store(Self::increment(current_head), Ordering::Release); + + v + } + + /// Returns the item in the front of the queue, without checking if there is something in the + /// queue + /// + /// # Unsafety + /// + /// If the queue is empty this operation will return uninitialized memory. + pub unsafe fn dequeue_unchecked(&mut self) -> T { + self.inner_dequeue_unchecked() + } + /// Splits a queue into producer and consumer endpoints pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) { (Producer { rb: self }, Consumer { rb: self }) @@ -464,6 +496,15 @@ impl<'a, T, const N: usize> Consumer<'a, T, N> { unsafe { self.rb.inner_dequeue() } } + /// Returns the item in the front of the queue, without checking if there are elements in the + /// queue + /// + /// See [`Queue::dequeue_unchecked`] for safety + #[inline] + pub unsafe fn dequeue_unchecked(&mut self) -> T { + self.rb.inner_dequeue_unchecked() + } + /// Returns if there are any items to dequeue. When this returns `true`, at least the /// first subsequent dequeue will succeed #[inline] @@ -505,14 +546,20 @@ impl<'a, T, const N: usize> Consumer<'a, T, N> { } impl<'a, T, const N: usize> Producer<'a, T, N> { - /// Adds an `item` to the end of the queue - /// - /// Returns back the `item` if the queue is full + /// Adds an `item` to the end of the queue, returns back the `item` if the queue is full #[inline] pub fn enqueue(&mut self, val: T) -> Result<(), T> { unsafe { self.rb.inner_enqueue(val) } } + /// Adds an `item` to the end of the queue, without checking if the queue is full + /// + /// See [`Queue::enqueue_unchecked`] for safety + #[inline] + pub unsafe fn enqueue_unchecked(&mut self, val: T) { + self.rb.inner_enqueue_unchecked(val) + } + /// Returns if there is any space to enqueue a new item. When this returns true, at /// least the first subsequent enqueue will succeed. #[inline]