From 394f6e47d9244fc21fc0b5d45c6561bffde8b269 Mon Sep 17 00:00:00 2001 From: cdellacqua Date: Fri, 31 Jan 2025 15:24:53 +0100 Subject: [PATCH 1/4] feat: provide specialized methods to copy to and from slices + `set_len` method that mimics `Vec::set_len` --- src/lib.rs | 553 +++++++++++++++++++++++++++++ src/ringbuffer_trait.rs | 132 +++++++ src/set_len_trait.rs | 29 ++ src/with_alloc/alloc_ringbuffer.rs | 8 +- src/with_alloc/vecdeque.rs | 62 ++++ src/with_const_generics.rs | 9 +- 6 files changed, 789 insertions(+), 4 deletions(-) create mode 100644 src/set_len_trait.rs diff --git a/src/lib.rs b/src/lib.rs index fc70842..2452212 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,9 @@ pub(crate) mod ringbuffer_trait; pub use ringbuffer_trait::RingBuffer; +mod set_len_trait; +pub use set_len_trait::SetLen; + #[cfg(feature = "alloc")] mod with_alloc; #[cfg(feature = "alloc")] @@ -1449,4 +1452,554 @@ mod tests { test_clone!(GrowableAllocRingBuffer::<_>::new()); test_clone!(AllocRingBuffer::<_>::new(4)); } + + #[test] + fn test_copy_from_slice_power_of_two() { + macro_rules! test_concrete { + ($rb_init: expr) => { + // same-sized slice + let mut rb = $rb_init([1, 2, 3, 4]); + rb.copy_from_slice(0, &[5, 6, 7, 8]); + assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); + + // same-sized slice after a push + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + rb.copy_from_slice(0, &[5, 6, 7, 8]); + assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); + + // same-sized slice after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + for _ in 0..rb.len() { + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + rb.copy_from_slice(0, &[5, 6, 7, 8]); + assert_eq!(rb.to_vec(), alloc::vec![5, 6, 7, 8]); + + // from offset + let mut rb = $rb_init([1, 2, 3, 4]); + rb.copy_from_slice(2, &[5, 6]); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 5, 6]); + + // from offset after a push + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + rb.copy_from_slice(2, &[5, 6]); + assert_eq!(rb.to_vec(), alloc::vec![2, 3, 5, 6]); + + // from offset after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + for _ in 0..rb.len() { + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + rb.copy_from_slice(2, &[5, 6]); + assert_eq!(rb.to_vec(), alloc::vec![0, 0, 5, 6]); + }; + } + + test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); + test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); + test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); + } + + #[test] + fn test_copy_from_slice_capacity_smaller_than_size() { + macro_rules! test_concrete { + ($rb_init: expr) => { + // same-sized slice + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); + assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); + + // same-sized slice after a push + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); + assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); + + // same-sized slice after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + for _ in 0..rb.len() { + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + rb.copy_from_slice(0, &[8, 9, 10, 11, 12, 13, 14]); + assert_eq!(rb.to_vec(), alloc::vec![8, 9, 10, 11, 12, 13, 14]); + + // from offset + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 8, 9, 10, 11, 12]); + + // from offset after a push + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); + assert_eq!(rb.to_vec(), alloc::vec![2, 3, 8, 9, 10, 11, 12]); + + // from offset after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + for _ in 0..rb.len() { + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + rb.copy_from_slice(2, &[8, 9, 10, 11, 12]); + assert_eq!(rb.to_vec(), alloc::vec![0, 0, 8, 9, 10, 11, 12]); + }; + } + + test_concrete!(|values: [i32; 7]| ConstGenericRingBuffer::<_, 7>::from(values)); + test_concrete!(|values: [i32; 7]| GrowableAllocRingBuffer::<_>::from(values)); + test_concrete!(|values: [i32; 7]| AllocRingBuffer::<_>::from(values)); + } + + #[test] + fn test_copy_from_slice_non_full_rb() { + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(&[3, 2, 1]); + assert_eq!(rb.capacity(), 7); + // we have some space left + assert!(rb.len() < rb.capacity()); + + // copy preserves length + rb.copy_from_slice(0, &[1, 2, 3]); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3]); + + let _ = rb.enqueue(4); + let _ = rb.enqueue(5); + let _ = rb.enqueue(6); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); + + // still preserving length + rb.copy_from_slice(0, &[6, 5, 4, 3, 2, 1]); + assert_eq!(rb.to_vec(), alloc::vec![6, 5, 4, 3, 2, 1]); + + // making sure the read/write ptrs have traversed the ring + for i in 0..6 { + let _ = rb.enqueue(i + 1); + let _ = rb.dequeue(); + } + + // sanity check + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); + // copy from offset + rb.copy_from_slice(3, &[3, 2, 1]); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 3, 2, 1]); + // copy again + rb.copy_from_slice(0, &[6, 5, 4, 1, 2, 3]); + assert_eq!(rb.to_vec(), alloc::vec![6, 5, 4, 1, 2, 3]); + }; + } + + test_concrete!(|values: &[i32]| { + let mut rb = ConstGenericRingBuffer::<_, 7>::new(); + rb.extend(values.iter().copied()); + rb + }); + test_concrete!(|values: &[i32]| { + let mut rb = GrowableAllocRingBuffer::<_>::with_capacity(7); + rb.extend(values.iter().copied()); + rb + }); + test_concrete!(|values: &[i32]| { + let mut rb = AllocRingBuffer::<_>::new(7); + rb.extend(values.iter().copied()); + rb + }); + } + + #[test] + fn test_copy_from_slice_empty() { + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(); + rb.copy_from_slice(0, &[0; 0]); + assert_eq!(rb.to_vec(), alloc::vec![]); + }; + } + + test_concrete!(ConstGenericRingBuffer::::new); + test_concrete!(|| GrowableAllocRingBuffer::::with_capacity(1)); + test_concrete!(|| AllocRingBuffer::::new(1)); + } + + #[test] + fn test_copy_to_slice_power_of_two() { + macro_rules! test_concrete { + ($rb_init: expr) => { + // same-sized slice + let rb = $rb_init([1, 2, 3, 4]); + let mut slice = [0; 4]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4]); + + // same-sized slice after a push + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + let mut slice = [0; 4]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[2, 3, 4, 0]); + + // same-sized slice after a roundtrip + let mut rb = $rb_init([4, 3, 2, 1]); + let initial_len = rb.len(); + for i in 0..rb.len() { + let _ = rb.enqueue((i + 1).try_into().unwrap()); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + let mut slice = [0; 4]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4]); + + // from offset + let rb = $rb_init([1, 2, 3, 4]); + let mut slice = [0; 2]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[3, 4]); + + // from offset after a push + let mut rb = $rb_init([1, 2, 3, 4]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + let mut slice = [0; 2]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[4, 0]); + + // from offset after a roundtrip + let mut rb = $rb_init([4, 3, 2, 1]); + let initial_len = rb.len(); + for i in 0..rb.len() { + let _ = rb.enqueue((i + 1).try_into().unwrap()); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + let mut slice = [0; 2]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[3, 4]); + }; + } + + test_concrete!(|values: [i32; 4]| ConstGenericRingBuffer::<_, 4>::from(values)); + test_concrete!(|values: [i32; 4]| GrowableAllocRingBuffer::<_>::from(values)); + test_concrete!(|values: [i32; 4]| AllocRingBuffer::<_>::from(values)); + } + + #[test] + fn test_copy_to_slice_capacity_smaller_than_size() { + macro_rules! test_concrete { + ($rb_init: expr) => { + // same-sized slice + let rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let mut slice = [0; 7]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6, 7]); + + // same-sized slice after a push + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + let mut slice = [0; 7]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[2, 3, 4, 5, 6, 7, 0]); + + // same-sized slice after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + for i in 0..rb.len() { + let _ = rb.enqueue((i + 1).try_into().unwrap()); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + let mut slice = [0; 7]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6, 7]); + + // from offset + let rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let mut slice = [0; 5]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[3, 4, 5, 6, 7]); + + // from offset after a push + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + let _ = rb.enqueue(0); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + let mut slice = [0; 5]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[4, 5, 6, 7, 0]); + + // from offset after a roundtrip + let mut rb = $rb_init([1, 2, 3, 4, 5, 6, 7]); + let initial_len = rb.len(); + for i in 0..rb.len() { + let _ = rb.enqueue((i + 1).try_into().unwrap()); + if rb.len() > initial_len { + let _ = rb.dequeue(); + } + } + let mut slice = [0; 5]; + rb.copy_to_slice(2, &mut slice); + assert_eq!(slice.as_slice(), &[3, 4, 5, 6, 7]); + }; + } + + test_concrete!(|values: [i32; 7]| ConstGenericRingBuffer::<_, 7>::from(values)); + test_concrete!(|values: [i32; 7]| GrowableAllocRingBuffer::<_>::from(values)); + test_concrete!(|values: [i32; 7]| AllocRingBuffer::<_>::from(values)); + } + + #[test] + fn test_copy_to_slice_non_full_rb() { + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(&[1, 2, 3]); + assert_eq!(rb.capacity(), 7); + // we have some space left + assert!(rb.len() < rb.capacity()); + + // copy based on length + let mut slice = [0; 3]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3]); + + let _ = rb.enqueue(4); + let _ = rb.enqueue(5); + let _ = rb.enqueue(6); + // still based on length + let mut slice = [0; 6]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6]); + + // making sure the read/write ptrs have traversed the ring + for i in 0..6 { + let _ = rb.enqueue(i + 1); + let _ = rb.dequeue(); + } + + // sanity check + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6]); + // copy again + let mut slice = [0; 6]; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[1, 2, 3, 4, 5, 6]); + }; + } + + test_concrete!(|values: &[i32]| { + let mut rb = ConstGenericRingBuffer::<_, 7>::new(); + rb.extend(values.iter().copied()); + rb + }); + test_concrete!(|values: &[i32]| { + let mut rb = GrowableAllocRingBuffer::<_>::with_capacity(7); + rb.extend(values.iter().copied()); + rb + }); + test_concrete!(|values: &[i32]| { + let mut rb = AllocRingBuffer::<_>::new(7); + rb.extend(values.iter().copied()); + rb + }); + } + + #[test] + fn test_copy_to_slice_empty() { + macro_rules! test_concrete { + ($rb_init: expr) => { + let rb = $rb_init(); + let mut slice = []; + rb.copy_to_slice(0, &mut slice); + assert_eq!(slice.as_slice(), &[0; 0]); + }; + } + + test_concrete!(ConstGenericRingBuffer::::new); + test_concrete!(|| GrowableAllocRingBuffer::::with_capacity(1)); + test_concrete!(|| AllocRingBuffer::::new(1)); + } + + #[test] + fn test_set_len_primitive() { + use crate::SetLen; + + let values = [1, 2, 3, 4, 5, 6, 7, 8]; + + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(); + let initial_capacity = rb.capacity(); + unsafe { rb.set_len(4) }; + assert_eq!(rb.capacity(), initial_capacity); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4]); + unsafe { rb.set_len(8) }; + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4, 5, 6, 7, 8]); + }; + } + + test_concrete!(|| ConstGenericRingBuffer::::from(values)); + test_concrete!(|| AllocRingBuffer::::from(values)); + } + + #[test] + fn test_set_len_leak() { + use crate::SetLen; + + #[derive(Default, Clone)] + struct Droppable { + dropped: bool, + } + impl Drop for Droppable { + fn drop(&mut self) { + self.dropped = true; + } + } + + let values = (0..8).map(|_| Droppable::default()).collect::>(); + + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(); + let initial_capacity = rb.capacity(); + unsafe { rb.set_len(4) }; + assert_eq!(rb.capacity(), initial_capacity); + assert!(rb.to_vec().iter().all(|item| !item.dropped)); + unsafe { rb.set_len(8) }; + assert!(rb.to_vec().iter().all(|item| !item.dropped)); + rb.clear(); + assert!(rb.to_vec().iter().all(|item| item.dropped)); + }; + } + + test_concrete!(|| ConstGenericRingBuffer::::from(values.clone())); + test_concrete!(|| AllocRingBuffer::::from(values)); + } + + #[test] + fn test_set_len_uninit_primitive() { + use crate::SetLen; + + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(); + assert_eq!(rb.len(), 0); + unsafe { rb.set_len(4) }; + assert_eq!(rb.len(), 4); + assert_eq!(rb.to_vec(), alloc::vec![1, 2, 3, 4]); + }; + } + + test_concrete!(|| { + let mut rb = ConstGenericRingBuffer::::new(); + let _ = rb.buf[0].write(1); + let _ = rb.buf[1].write(2); + let _ = rb.buf[2].write(3); + let _ = rb.buf[3].write(4); + rb + }); + test_concrete!(|| { + let rb = AllocRingBuffer::::with_capacity_power_of_2(3); + unsafe { + *rb.buf = 1; + *rb.buf.add(1) = 2; + *rb.buf.add(2) = 3; + *rb.buf.add(3) = 4; + } + rb + }); + } + + #[test] + fn test_set_len_uninit_droppable() { + use crate::SetLen; + + #[derive(Default, Clone)] + struct Droppable { + dropped: bool, + } + impl Drop for Droppable { + fn drop(&mut self) { + self.dropped = true; + } + } + + macro_rules! test_concrete { + ($rb_init: expr) => { + let mut rb = $rb_init(); + assert_eq!(rb.len(), 0); + assert!(rb.to_vec().iter().all(|item| !item.dropped)); + unsafe { rb.set_len(4) }; + assert_eq!(rb.len(), 4); + assert!(rb.to_vec().iter().all(|item| !item.dropped)); + rb.clear(); + assert!(rb.to_vec().iter().all(|item| item.dropped)); + }; + } + + test_concrete!(|| { + let mut rb = ConstGenericRingBuffer::::new(); + let _ = rb.buf[0].write(Droppable::default()); + let _ = rb.buf[1].write(Droppable::default()); + let _ = rb.buf[2].write(Droppable::default()); + let _ = rb.buf[3].write(Droppable::default()); + rb + }); + test_concrete!(|| { + let rb = AllocRingBuffer::::with_capacity_power_of_2(3); + unsafe { + *rb.buf = Droppable::default(); + *rb.buf.add(1) = Droppable::default(); + *rb.buf.add(2) = Droppable::default(); + *rb.buf.add(3) = Droppable::default(); + } + rb + }); + } } diff --git a/src/ringbuffer_trait.rs b/src/ringbuffer_trait.rs index 1a549fe..62e2ee8 100644 --- a/src/ringbuffer_trait.rs +++ b/src/ringbuffer_trait.rs @@ -253,6 +253,49 @@ pub unsafe trait RingBuffer: { self.iter().any(|i| i == elem) } + + /// Efficiently copy items from the ringbuffer to a target slice. + /// + /// # Panics + /// Panics if the buffer length minus the offset is NOT equal to `target.len()`. + /// + /// # Safety + /// ONLY SAFE WHEN self is a *const to to an implementor of `RingBuffer` + unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) + where + T: Copy; + + /// Efficiently copy items from the ringbuffer to a target slice. + /// + /// # Panics + /// Panics if the buffer length minus the offset is NOT equal to `target.len()`. + fn copy_to_slice(&self, offset: usize, dst: &mut [T]) + where + T: Copy, + { + unsafe { Self::ptr_copy_to_slice(self, offset, dst) } + } + + /// Efficiently copy items from a slice to the ringbuffer. + /// # Panics + /// Panics if the buffer length minus the offset is NOT equal to `source.len()`. + /// + /// # Safety + /// ONLY SAFE WHEN self is a *mut to to an implementor of `RingBuffer` + unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) + where + T: Copy; + + /// Efficiently copy items from a slice to the ringbuffer. + /// + /// # Panics + /// Panics if the buffer length minus the offset is NOT equal to `source.len()`. + fn copy_from_slice(&mut self, offset: usize, src: &[T]) + where + T: Copy, + { + unsafe { Self::ptr_copy_from_slice(self, offset, src) } + } } mod iter { @@ -537,5 +580,94 @@ macro_rules! impl_ringbuffer_ext { self.$readptr = 0; self.$writeptr = 0; } + + unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) + where + T: Copy, + { + let len = Self::ptr_len(rb); + let dst_len = dst.len(); + assert!( + (offset == 0 && len == 0) || offset < len, + "offset ({offset}) is out of bounds for the current buffer length ({len})" + ); + assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); + + if dst_len == 0 { + return; + } + + let base: *const T = $get_unchecked(rb, 0); + let size = Self::ptr_buffer_size(rb); + let offset_readptr = (*rb).$readptr + offset; + + let from_idx = $mask(size, offset_readptr); + let to_idx = $mask(size, offset_readptr + dst_len); + + if from_idx < to_idx { + dst.copy_from_slice(unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts(base.add(from_idx), dst_len) + }); + } else { + dst[..size - from_idx].copy_from_slice(unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts(base.add(from_idx), size - from_idx) + }); + dst[size - from_idx..].copy_from_slice(unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts(base, to_idx) + }); + } + } + + unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) + where + T: Copy, + { + let len = Self::ptr_len(rb); + let src_len = src.len(); + assert!( + (offset == 0 && len == 0) || offset < len, + "offset ({offset}) is out of bounds for the current buffer length ({len})" + ); + assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); + + if src_len == 0 { + return; + } + + let base: *mut T = $get_unchecked_mut(rb, 0); + let size = Self::ptr_buffer_size(rb); + let offset_readptr = (*rb).$readptr + offset; + + let from_idx = $mask(size, offset_readptr); + let to_idx = $mask(size, offset_readptr + src_len); + + if from_idx < to_idx { + unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts_mut(base.add(from_idx), src_len) + } + .copy_from_slice(src); + } else { + unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts_mut(base.add(from_idx), size - from_idx) + } + .copy_from_slice(&src[..size - from_idx]); + unsafe { + // SAFETY: index has been modulo-ed to be within range + // to be within bounds + core::slice::from_raw_parts_mut(base, to_idx) + } + .copy_from_slice(&src[size - from_idx..]); + } + } }; } diff --git a/src/set_len_trait.rs b/src/set_len_trait.rs new file mode 100644 index 0000000..91c00fc --- /dev/null +++ b/src/set_len_trait.rs @@ -0,0 +1,29 @@ +/// `SetLen` is a trait defining the unsafe `set_len` method +/// on ringbuffers that support the operation. +pub trait SetLen { + /// Force the length of the ringbuffer to `new_len`. + /// + /// Note that downsizing will not call Drop on elements at `new_len..old_len`, + /// potentially causing a memory leak. + /// + /// # Panics + /// Panics if `new_len` is greater than the ringbuffer capacity. + /// + /// # Safety + /// - Safe when `new_len <= old_len`. + /// - Safe when `new_len > old_len` and all the elements at `old_len..new_len` are already initialized. + unsafe fn set_len(&mut self, new_len: usize); +} + +/// Implement `set_len` given a `readptr` and a `writeptr`. +#[macro_export] +macro_rules! impl_ring_buffer_set_len { + ($readptr: ident, $writeptr: ident) => { + #[inline] + unsafe fn set_len(&mut self, new_len: usize) { + let cap = self.capacity(); + assert!(new_len <= cap, "Cannot set the a length of {new_len} on a ringbuffer with capacity for {cap} items"); + self.$writeptr = self.$readptr + new_len; + } + }; +} diff --git a/src/with_alloc/alloc_ringbuffer.rs b/src/with_alloc/alloc_ringbuffer.rs index f21769e..6c918a9 100644 --- a/src/with_alloc/alloc_ringbuffer.rs +++ b/src/with_alloc/alloc_ringbuffer.rs @@ -7,7 +7,7 @@ use crate::ringbuffer_trait::{ extern crate alloc; // We need boxes, so depend on alloc -use crate::{mask_and, GrowableAllocRingBuffer}; +use crate::{impl_ring_buffer_set_len, mask_and, GrowableAllocRingBuffer, SetLen}; use core::ptr; /// The `AllocRingBuffer` is a `RingBuffer` which is based on a Vec. This means it allocates at runtime @@ -38,7 +38,7 @@ use core::ptr; /// ``` #[derive(Debug)] pub struct AllocRingBuffer { - buf: *mut T, + pub(crate) buf: *mut T, // the size of the allocation. Next power of 2 up from the capacity size: usize, @@ -369,6 +369,10 @@ impl IndexMut for AllocRingBuffer { } } +impl SetLen for AllocRingBuffer { + impl_ring_buffer_set_len!(readptr, writeptr); +} + #[cfg(test)] mod tests { use crate::{AllocRingBuffer, RingBuffer}; diff --git a/src/with_alloc/vecdeque.rs b/src/with_alloc/vecdeque.rs index 978bbe5..c195150 100644 --- a/src/with_alloc/vecdeque.rs +++ b/src/with_alloc/vecdeque.rs @@ -255,6 +255,68 @@ unsafe impl RingBuffer for GrowableAllocRingBuffer { } .map(|i| i as *mut T) } + + unsafe fn ptr_copy_to_slice(rb: *const Self, offset: usize, dst: &mut [T]) + where + T: Copy, + { + let len = Self::ptr_len(rb); + let dst_len = dst.len(); + assert!( + (offset == 0 && len == 0) || offset < len, + "offset ({offset}) is out of bounds for the current buffer length ({len})" + ); + assert!(len - offset == dst_len, "destination slice length ({dst_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); + + if dst_len == 0 { + return; + } + + let (front, back) = (*rb).0.as_slices(); + let first_len = front.len(); + + if offset < first_len { + let n_in_first = first_len - offset; + dst[..n_in_first].copy_from_slice(&front[offset..]); + + if n_in_first < dst_len { + dst[n_in_first..].copy_from_slice(&back[..dst_len - n_in_first]); + } + } else { + dst.copy_from_slice(&back[offset - first_len..]); + } + } + + unsafe fn ptr_copy_from_slice(rb: *mut Self, offset: usize, src: &[T]) + where + T: Copy, + { + let len = Self::ptr_len(rb); + let src_len = src.len(); + assert!( + (offset == 0 && len == 0) || offset < len, + "offset ({offset}) is out of bounds for the current buffer length ({len})" + ); + assert!(len - offset == src_len, "source slice length ({src_len}) doesn't match buffer length ({len}) when considering the specified offset ({offset})"); + + if src_len == 0 { + return; + } + + let (front, back) = (*rb).0.as_mut_slices(); + let first_len = front.len(); + + if offset < first_len { + let n_in_first = first_len - offset; + front[offset..].copy_from_slice(&src[..n_in_first]); + + if n_in_first < src_len { + back[..src_len - n_in_first].copy_from_slice(&src[n_in_first..]); + } + } else { + back[offset - first_len..].copy_from_slice(src); + } + } } impl Extend for GrowableAllocRingBuffer { diff --git a/src/with_const_generics.rs b/src/with_const_generics.rs index fc9857d..0db112a 100644 --- a/src/with_const_generics.rs +++ b/src/with_const_generics.rs @@ -1,5 +1,5 @@ use crate::ringbuffer_trait::{RingBufferIntoIterator, RingBufferIterator, RingBufferMutIterator}; -use crate::RingBuffer; +use crate::{impl_ring_buffer_set_len, RingBuffer, SetLen}; use core::iter::FromIterator; use core::mem::MaybeUninit; use core::mem::{self, ManuallyDrop}; @@ -35,7 +35,7 @@ use core::ops::{Index, IndexMut}; /// ``` #[derive(Debug)] pub struct ConstGenericRingBuffer { - buf: [MaybeUninit; CAP], + pub(crate) buf: [MaybeUninit; CAP], readptr: usize, writeptr: usize, } @@ -356,6 +356,11 @@ impl IndexMut for ConstGenericRingBuffer { } } + +impl SetLen for ConstGenericRingBuffer { + impl_ring_buffer_set_len!(readptr, writeptr); +} + #[cfg(test)] mod tests { use crate::{AllocRingBuffer, ConstGenericRingBuffer, GrowableAllocRingBuffer, RingBuffer}; From 00e654ce30702c6498c6ad4f55417c27941f61ce Mon Sep 17 00:00:00 2001 From: cdellacqua Date: Fri, 31 Jan 2025 15:25:18 +0100 Subject: [PATCH 2/4] test: add benchmarks --- Cargo.toml | 2 +- benches/bench.rs | 217 ++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 216 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 54f5b71..28e92a1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ categories = ["data-structures"] license = "MIT" [dev-dependencies] -criterion = "0.4.0" +criterion = { version = "0.4.0", features = ["html_reports"] } compiletest_rs = "0.10.0" [features] diff --git a/benches/bench.rs b/benches/bench.rs index 0f03df0..c005be9 100644 --- a/benches/bench.rs +++ b/benches/bench.rs @@ -1,6 +1,7 @@ -#![no_coverage] +#![feature(coverage_attribute)] +#![coverage(off)] use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion}; -use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, RingBuffer}; +use ringbuffer::{AllocRingBuffer, ConstGenericRingBuffer, RingBuffer, SetLen}; fn benchmark_push, F: Fn() -> T>(b: &mut Bencher, new: F) { b.iter(|| { @@ -63,6 +64,89 @@ fn benchmark_various, F: Fn() -> T>(b: &mut Bencher, new: F) }) } +fn benchmark_copy_to_slice_vs_extend, F: Fn() -> T>( + rb_size: usize, + rb_type: &str, + fn_name: &str, + c: &mut Criterion, + new: F, +) { + let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})")); + let mut output = vec![0; rb_size]; + group.bench_function(format!("CopyTo({rb_type}; {rb_size})"), |b| { + let mut rb = new(); + rb.fill(9); + // making sure the read/write pointers wrap around + for _ in 0..rb_size / 2 { + let _ = rb.dequeue(); + let _ = rb.enqueue(9); + } + b.iter(|| { + rb.copy_to_slice(0, &mut output); + assert_eq!(output[output.len() / 2], 9); + assert_eq!(output.len(), rb_size); + }) + }); + let mut output: Vec = Vec::with_capacity(rb_size); + group.bench_function(format!("ExtendVec({rb_type}; {rb_size})"), |b| { + let mut rb = new(); + rb.fill(9); + // making sure the read/write pointers wrap around + for _ in 0..rb_size / 2 { + let _ = rb.dequeue(); + let _ = rb.enqueue(9); + } + b.iter(|| { + unsafe { output.set_len(0) }; + output.extend(rb.iter()); + assert_eq!(output[output.len() / 2], 9); + assert_eq!(output.len(), rb_size); + }) + }); + group.finish(); +} + +fn benchmark_copy_from_slice_vs_extend + SetLen, F: Fn() -> T>( + rb_size: usize, + rb_type: &str, + fn_name: &str, + c: &mut Criterion, + new: F, +) { + let mut group = c.benchmark_group(format!("{fn_name}({rb_type}, {rb_size})")); + let input = vec![9; rb_size]; + group.bench_function(format!("CopyFrom({rb_type}; {rb_size})"), |b| { + let mut rb = new(); + rb.fill(0); + // making sure the read/write pointers wrap around + for _ in 0..rb_size / 2 { + let _ = rb.dequeue(); + let _ = rb.enqueue(0); + } + for _ in 0..rb_size / 2 {} + b.iter(|| { + rb.copy_from_slice(0, &input); + assert_eq!(rb[rb.len() / 2], 9); + assert_eq!(rb.len(), rb_size); + }) + }); + group.bench_function(format!("ExtendRb({rb_type}; {rb_size})"), |b| { + let mut rb = new(); + // making sure the read/write pointers wrap around + for _ in 0..rb_size / 2 { + let _ = rb.dequeue(); + let _ = rb.enqueue(0); + } + b.iter(|| { + unsafe { rb.set_len(0) }; + rb.extend(input.iter().copied()); + assert_eq!(rb[rb.len() / 2], 9); + assert_eq!(rb.len(), rb_size); + }) + }); + group.finish(); +} + macro_rules! generate_benches { (called, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { $( @@ -86,6 +170,22 @@ macro_rules! generate_benches { })); )* }; + + (compare, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { + $( + $bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || { + $rb::<$ty>::$fn($i) + }); + )* + }; + + (compare_typed, $c: tt, $rb: tt, $ty: tt, $fn: tt, $bmfunc: tt, $($i:tt),*) => { + $( + $bmfunc($i, stringify!($rb), stringify!($bmfunc), $c, || { + $rb::<$ty, $i>::$fn() + }); + )* + }; } fn criterion_benchmark(c: &mut Criterion) { @@ -180,6 +280,119 @@ fn criterion_benchmark(c: &mut Criterion) { 8192, 8195 ]; + generate_benches![ + compare, + c, + AllocRingBuffer, + i32, + new, + benchmark_copy_to_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare_typed, + c, + ConstGenericRingBuffer, + i32, + new, + benchmark_copy_to_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare, + c, + AllocRingBuffer, + i32, + new, + benchmark_copy_from_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare_typed, + c, + ConstGenericRingBuffer, + i32, + new, + benchmark_copy_from_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + + generate_benches![ + compare, + c, + AllocRingBuffer, + i32, + new, + benchmark_copy_to_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare_typed, + c, + ConstGenericRingBuffer, + i32, + new, + benchmark_copy_to_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare, + c, + AllocRingBuffer, + i32, + new, + benchmark_copy_from_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; + generate_benches![ + compare_typed, + c, + ConstGenericRingBuffer, + i32, + new, + benchmark_copy_from_slice_vs_extend, + 16, + 1024, + 4096, + 8192, + 1_000_000, + 1_048_576 + ]; } criterion_group!(benches, criterion_benchmark); From 70832ae18daad75a317c7854311562e8c0014f2b Mon Sep 17 00:00:00 2001 From: cdellacqua Date: Fri, 31 Jan 2025 17:25:24 +0100 Subject: [PATCH 3/4] chore: rustfmt --- src/with_const_generics.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/with_const_generics.rs b/src/with_const_generics.rs index 0db112a..0aa9828 100644 --- a/src/with_const_generics.rs +++ b/src/with_const_generics.rs @@ -356,7 +356,6 @@ impl IndexMut for ConstGenericRingBuffer { } } - impl SetLen for ConstGenericRingBuffer { impl_ring_buffer_set_len!(readptr, writeptr); } From 3a36251f7c437e579893f8d84969147cb99712d3 Mon Sep 17 00:00:00 2001 From: cdellacqua Date: Mon, 3 Feb 2025 19:29:45 +0100 Subject: [PATCH 4/4] fix: add a dedicated function to get to the base of the buffer #141 --- src/ringbuffer_trait.rs | 6 +++--- src/with_alloc/alloc_ringbuffer.rs | 12 ++++++++++++ src/with_const_generics.rs | 12 ++++++++++++ 3 files changed, 27 insertions(+), 3 deletions(-) diff --git a/src/ringbuffer_trait.rs b/src/ringbuffer_trait.rs index 62e2ee8..a788d3a 100644 --- a/src/ringbuffer_trait.rs +++ b/src/ringbuffer_trait.rs @@ -498,7 +498,7 @@ macro_rules! impl_ringbuffer { /// Implement various functions on implementors of [`RingBuffer`]. /// This is to avoid duplicate code. macro_rules! impl_ringbuffer_ext { - ($get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => { + ($get_base_ptr: ident, $get_base_mut_ptr: ident, $get_unchecked: ident, $get_unchecked_mut: ident, $readptr: ident, $writeptr: ident, $mask: expr) => { #[inline] fn get_signed(&self, index: isize) -> Option<&T> { use core::ops::Not; @@ -597,7 +597,7 @@ macro_rules! impl_ringbuffer_ext { return; } - let base: *const T = $get_unchecked(rb, 0); + let base: *const T = $get_base_ptr(rb); let size = Self::ptr_buffer_size(rb); let offset_readptr = (*rb).$readptr + offset; @@ -640,7 +640,7 @@ macro_rules! impl_ringbuffer_ext { return; } - let base: *mut T = $get_unchecked_mut(rb, 0); + let base: *mut T = $get_base_mut_ptr(rb); let size = Self::ptr_buffer_size(rb); let offset_readptr = (*rb).$readptr + offset; diff --git a/src/with_alloc/alloc_ringbuffer.rs b/src/with_alloc/alloc_ringbuffer.rs index 6c918a9..c64ecbd 100644 --- a/src/with_alloc/alloc_ringbuffer.rs +++ b/src/with_alloc/alloc_ringbuffer.rs @@ -274,6 +274,8 @@ unsafe impl RingBuffer for AllocRingBuffer { } impl_ringbuffer_ext!( + get_base_ptr, + get_base_mut_ptr, get_unchecked, get_unchecked_mut, readptr, @@ -332,6 +334,16 @@ impl AllocRingBuffer { } } +/// Get a const pointer to the buffer +unsafe fn get_base_ptr(rb: *const AllocRingBuffer) -> *const T { + (*rb).buf.cast() +} + +/// Get a mut pointer to the buffer +unsafe fn get_base_mut_ptr(rb: *mut AllocRingBuffer) -> *mut T { + (*rb).buf +} + /// Get a reference from the buffer without checking it is initialized. /// /// Caller must be sure the index is in bounds, or this will panic. diff --git a/src/with_const_generics.rs b/src/with_const_generics.rs index 0aa9828..1ab5240 100644 --- a/src/with_const_generics.rs +++ b/src/with_const_generics.rs @@ -189,6 +189,16 @@ impl ConstGenericRingBuffer { } } +/// Get a const pointer to the buffer +unsafe fn get_base_ptr(rb: *const ConstGenericRingBuffer) -> *const T { + (*rb).buf.as_ptr().cast() +} + +/// Get a mut pointer to the buffer +unsafe fn get_base_mut_ptr(rb: *mut ConstGenericRingBuffer) -> *mut T { + (*rb).buf.as_mut_ptr().cast() +} + /// Get a reference from the buffer without checking it is initialized /// Caller MUST be sure this index is initialized, or undefined behavior will happen unsafe fn get_unchecked<'a, T, const N: usize>( @@ -305,6 +315,8 @@ unsafe impl RingBuffer for ConstGenericRingBuffer