Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Don't cache head/tail index in Consumer/Producer #48

Merged
merged 2 commits into from
Dec 9, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 13 additions & 8 deletions src/chunks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -238,7 +238,8 @@ impl<T> Producer<T> {
/// For a safe alternative that provides mutable slices of [`Default`]-initialized slots,
/// see [`Producer::write_chunk()`].
pub fn write_chunk_uninit(&mut self, n: usize) -> Result<WriteChunkUninit<'_, T>, ChunkError> {
let tail = self.cached_tail.get();
// "tail" is only ever written by the producer thread, "Relaxed" is enough
let tail = self.buffer.tail.load(Ordering::Relaxed);

// Check if the queue has *possibly* not enough slots.
if self.buffer.capacity - self.buffer.distance(self.cached_head.get(), tail) < n {
Expand Down Expand Up @@ -287,7 +288,8 @@ impl<T> Consumer<T> {
///
/// See the documentation of the [`chunks`](crate::chunks#examples) module.
pub fn read_chunk(&mut self, n: usize) -> Result<ReadChunk<'_, T>, ChunkError> {
let head = self.cached_head.get();
// "head" is only ever written by the consumer thread, "Relaxed" is enough
let head = self.buffer.head.load(Ordering::Relaxed);

// Check if the queue has *possibly* not enough slots.
if self.buffer.distance(head, self.cached_tail.get()) < n {
Expand Down Expand Up @@ -499,9 +501,10 @@ impl<T> WriteChunkUninit<'_, T> {

unsafe fn commit_unchecked(self, n: usize) -> usize {
let p = self.producer;
let tail = p.buffer.increment(p.cached_tail.get(), n);
// "tail" is only ever written by the producer thread, "Relaxed" is enough
let tail = p.buffer.tail.load(Ordering::Relaxed);
let tail = p.buffer.increment(tail, n);
p.buffer.tail.store(tail, Ordering::Release);
p.cached_tail.set(tail);
n
}

Expand Down Expand Up @@ -735,9 +738,10 @@ impl<T> ReadChunk<'_, T> {
self.second_ptr.add(i).drop_in_place();
}
let c = self.consumer;
let head = c.buffer.increment(c.cached_head.get(), n);
// "head" is only ever written by the consumer thread, "Relaxed" is enough
let head = c.buffer.head.load(Ordering::Relaxed);
let head = c.buffer.increment(head, n);
c.buffer.head.store(head, Ordering::Release);
c.cached_head.set(head);
n
}

Expand Down Expand Up @@ -789,9 +793,10 @@ impl<'a, T> Drop for ReadChunkIntoIter<'a, T> {
/// Non-iterated items remain in the ring buffer and are *not* dropped.
fn drop(&mut self) {
let c = &self.chunk.consumer;
let head = c.buffer.increment(c.cached_head.get(), self.iterated);
// "head" is only ever written by the consumer thread, "Relaxed" is enough
let head = c.buffer.head.load(Ordering::Relaxed);
let head = c.buffer.increment(head, self.iterated);
c.buffer.head.store(head, Ordering::Release);
c.cached_head.set(head);
}
}

Expand Down
28 changes: 10 additions & 18 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,11 +127,9 @@ impl<T> RingBuffer<T> {
let p = Producer {
buffer: buffer.clone(),
cached_head: Cell::new(0),
cached_tail: Cell::new(0),
};
let c = Consumer {
buffer,
cached_head: Cell::new(0),
cached_tail: Cell::new(0),
};
(p, c)
Expand Down Expand Up @@ -281,11 +279,6 @@ pub struct Producer<T> {
///
/// This value can be stale and sometimes needs to be resynchronized with `buffer.head`.
cached_head: Cell<usize>,

/// A copy of `buffer.tail` for quick access.
///
/// This value is always in sync with `buffer.tail`.
cached_tail: Cell<usize>,
}

unsafe impl<T: Send> Send for Producer<T> {}
Expand Down Expand Up @@ -317,7 +310,6 @@ impl<T> Producer<T> {
}
let tail = self.buffer.increment1(tail);
self.buffer.tail.store(tail, Ordering::Release);
self.cached_tail.set(tail);
Ok(())
} else {
Err(PushError::Full(value))
Expand Down Expand Up @@ -345,7 +337,9 @@ impl<T> Producer<T> {
pub fn slots(&self) -> usize {
let head = self.buffer.head.load(Ordering::Acquire);
self.cached_head.set(head);
self.buffer.capacity - self.buffer.distance(head, self.cached_tail.get())
// "tail" is only ever written by the producer thread, "Relaxed" is enough
let tail = self.buffer.tail.load(Ordering::Relaxed);
self.buffer.capacity - self.buffer.distance(head, tail)
}

/// Returns `true` if there are currently no slots available for writing.
Expand Down Expand Up @@ -440,7 +434,8 @@ impl<T> Producer<T> {
/// This is a strict subset of the functionality implemented in `write_chunk_uninit()`.
/// For performance, this special case is immplemented separately.
fn next_tail(&self) -> Option<usize> {
let tail = self.cached_tail.get();
// "tail" is only ever written by the producer thread, "Relaxed" is enough
let tail = self.buffer.tail.load(Ordering::Relaxed);

// Check if the queue is *possibly* full.
if self.buffer.distance(self.cached_head.get(), tail) == self.buffer.capacity {
Expand Down Expand Up @@ -482,11 +477,6 @@ pub struct Consumer<T> {
/// A reference to the ring buffer.
buffer: Arc<RingBuffer<T>>,

/// A copy of `buffer.head` for quick access.
///
/// This value is always in sync with `buffer.head`.
cached_head: Cell<usize>,

/// A copy of `buffer.tail` for quick access.
///
/// This value can be stale and sometimes needs to be resynchronized with `buffer.tail`.
Expand Down Expand Up @@ -530,7 +520,6 @@ impl<T> Consumer<T> {
let value = unsafe { self.buffer.slot_ptr(head).read() };
let head = self.buffer.increment1(head);
self.buffer.head.store(head, Ordering::Release);
self.cached_head.set(head);
Ok(value)
} else {
Err(PopError::Empty)
Expand Down Expand Up @@ -584,7 +573,9 @@ impl<T> Consumer<T> {
pub fn slots(&self) -> usize {
let tail = self.buffer.tail.load(Ordering::Acquire);
self.cached_tail.set(tail);
self.buffer.distance(self.cached_head.get(), tail)
// "head" is only ever written by the consumer thread, "Relaxed" is enough
let head = self.buffer.head.load(Ordering::Relaxed);
self.buffer.distance(head, tail)
}

/// Returns `true` if there are currently no slots available for reading.
Expand Down Expand Up @@ -678,7 +669,8 @@ impl<T> Consumer<T> {
/// This is a strict subset of the functionality implemented in `read_chunk()`.
/// For performance, this special case is immplemented separately.
fn next_head(&self) -> Option<usize> {
let head = self.cached_head.get();
// "head" is only ever written by the consumer thread, "Relaxed" is enough
let head = self.buffer.head.load(Ordering::Relaxed);

// Check if the queue is *possibly* empty.
if head == self.cached_tail.get() {
Expand Down