From 0f9a946b1380bb07e47c87c1555ba60a5c9841f8 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Sat, 30 Nov 2024 16:46:01 +0000 Subject: [PATCH 1/5] drop dead code --- src/decoding/ringbuffer.rs | 200 ------------------------------------- 1 file changed, 200 deletions(-) diff --git a/src/decoding/ringbuffer.rs b/src/decoding/ringbuffer.rs index 9af2a69f..026b20db 100644 --- a/src/decoding/ringbuffer.rs +++ b/src/decoding/ringbuffer.rs @@ -465,66 +465,6 @@ impl RingBuffer { self.tail = (self.tail + len) % self.cap; } - - #[allow(dead_code)] - /// This function is functionally the same as [RingBuffer::extend_from_within_unchecked], - /// but it does not contain any branching operations. - /// - /// SAFETY: - /// Needs start + len <= self.len() - /// And more then len reserved space - pub unsafe fn extend_from_within_unchecked_branchless(&mut self, start: usize, len: usize) { - // data slices in raw parts - let ((s1_ptr, s1_len), (s2_ptr, s2_len)) = self.data_slice_parts(); - - debug_assert!(len <= s1_len + s2_len, "{} > {} + {}", len, s1_len, s2_len); - - // calc the actually wanted slices in raw parts - let start_in_s1 = usize::min(s1_len, start); - let end_in_s1 = usize::min(s1_len, start + len); - let m1_ptr = s1_ptr.add(start_in_s1); - let m1_len = end_in_s1 - start_in_s1; - - debug_assert!(end_in_s1 <= s1_len); - debug_assert!(start_in_s1 <= s1_len); - - let start_in_s2 = start.saturating_sub(s1_len); - let end_in_s2 = start_in_s2 + (len - m1_len); - let m2_ptr = s2_ptr.add(start_in_s2); - let m2_len = end_in_s2 - start_in_s2; - - debug_assert!(start_in_s2 <= s2_len); - debug_assert!(end_in_s2 <= s2_len); - - debug_assert_eq!(len, m1_len + m2_len); - - // the free slices, must hold: f1_len + f2_len >= m1_len + m2_len - let ((f1_ptr, f1_len), (f2_ptr, f2_len)) = self.free_slice_parts(); - - debug_assert!(f1_len + f2_len >= m1_len + m2_len); - - // calc how many from where bytes go where - let m1_in_f1 = usize::min(m1_len, f1_len); - let m1_in_f2 = m1_len - m1_in_f1; - let m2_in_f1 = usize::min(f1_len - m1_in_f1, m2_len); - let m2_in_f2 = m2_len - m2_in_f1; - - debug_assert_eq!(m1_len, m1_in_f1 + m1_in_f2); - debug_assert_eq!(m2_len, m2_in_f1 + m2_in_f2); - debug_assert!(f1_len >= m1_in_f1 + m2_in_f1); - debug_assert!(f2_len >= m1_in_f2 + m2_in_f2); - debug_assert_eq!(len, m1_in_f1 + m2_in_f1 + m1_in_f2 + m2_in_f2); - - debug_assert!(self.buf.as_ptr().add(self.cap) > f1_ptr.add(m1_in_f1 + m2_in_f1)); - debug_assert!(self.buf.as_ptr().add(self.cap) > f2_ptr.add(m1_in_f2 + m2_in_f2)); - - debug_assert!((m1_in_f2 > 0) ^ (m2_in_f1 > 0) || (m1_in_f2 == 0 && m2_in_f1 == 0)); - - copy_with_checks( - m1_ptr, m2_ptr, f1_ptr, f2_ptr, m1_in_f1, m2_in_f1, m1_in_f2, m2_in_f2, - ); - self.tail = (self.tail + len) % self.cap; - } } impl Drop for RingBuffer { @@ -605,146 +545,6 @@ unsafe fn copy_bytes_overshooting( ); } -#[allow(dead_code)] -#[inline(always)] -#[allow(clippy::too_many_arguments)] -unsafe fn copy_without_checks( - m1_ptr: *const u8, - m2_ptr: *const u8, - f1_ptr: *mut u8, - f2_ptr: *mut u8, - m1_in_f1: usize, - m2_in_f1: usize, - m1_in_f2: usize, - m2_in_f2: usize, -) { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f1_ptr - .add(m1_in_f1) - .copy_from_nonoverlapping(m2_ptr, m2_in_f1); - - f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); - f2_ptr - .add(m1_in_f2) - .copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); -} - -#[allow(dead_code)] -#[inline(always)] -#[allow(clippy::too_many_arguments)] -unsafe fn copy_with_checks( - m1_ptr: *const u8, - m2_ptr: *const u8, - f1_ptr: *mut u8, - f2_ptr: *mut u8, - m1_in_f1: usize, - m2_in_f1: usize, - m1_in_f2: usize, - m2_in_f2: usize, -) { - if m1_in_f1 != 0 { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - } - if m2_in_f1 != 0 { - f1_ptr - .add(m1_in_f1) - .copy_from_nonoverlapping(m2_ptr, m2_in_f1); - } - - if m1_in_f2 != 0 { - f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); - } - if m2_in_f2 != 0 { - f2_ptr - .add(m1_in_f2) - .copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); - } -} - -#[allow(dead_code)] -#[inline(always)] -#[allow(clippy::too_many_arguments)] -unsafe fn copy_with_nobranch_check( - m1_ptr: *const u8, - m2_ptr: *const u8, - f1_ptr: *mut u8, - f2_ptr: *mut u8, - m1_in_f1: usize, - m2_in_f1: usize, - m1_in_f2: usize, - m2_in_f2: usize, -) { - let case = (m1_in_f1 > 0) as usize - | (((m2_in_f1 > 0) as usize) << 1) - | (((m1_in_f2 > 0) as usize) << 2) - | (((m2_in_f2 > 0) as usize) << 3); - - match case { - 0 => {} - - // one bit set - 1 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - } - 2 => { - f1_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f1); - } - 4 => { - f2_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f2); - } - 8 => { - f2_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f2); - } - - // two bit set - 3 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f1_ptr - .add(m1_in_f1) - .copy_from_nonoverlapping(m2_ptr, m2_in_f1); - } - 5 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); - } - 6 => core::hint::unreachable_unchecked(), - 7 => core::hint::unreachable_unchecked(), - 9 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f2_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f2); - } - 10 => { - f1_ptr.copy_from_nonoverlapping(m2_ptr, m2_in_f1); - f2_ptr.copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); - } - 12 => { - f2_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f2); - f2_ptr - .add(m1_in_f2) - .copy_from_nonoverlapping(m2_ptr, m2_in_f2); - } - - // three bit set - 11 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f1_ptr - .add(m1_in_f1) - .copy_from_nonoverlapping(m2_ptr, m2_in_f1); - f2_ptr.copy_from_nonoverlapping(m2_ptr.add(m2_in_f1), m2_in_f2); - } - 13 => { - f1_ptr.copy_from_nonoverlapping(m1_ptr, m1_in_f1); - f2_ptr.copy_from_nonoverlapping(m1_ptr.add(m1_in_f1), m1_in_f2); - f2_ptr - .add(m1_in_f2) - .copy_from_nonoverlapping(m2_ptr, m2_in_f2); - } - 14 => core::hint::unreachable_unchecked(), - 15 => core::hint::unreachable_unchecked(), - _ => core::hint::unreachable_unchecked(), - } -} - #[cfg(test)] mod tests { use super::RingBuffer; From 64018eaca1d4a6ed2268138bb27193d9fa725d67 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Sat, 30 Nov 2024 17:35:11 +0000 Subject: [PATCH 2/5] NonNull -> Box --- src/decoding/ringbuffer.rs | 269 ++++++++++++++++--------------------- 1 file changed, 118 insertions(+), 151 deletions(-) diff --git a/src/decoding/ringbuffer.rs b/src/decoding/ringbuffer.rs index 026b20db..e82e641a 100644 --- a/src/decoding/ringbuffer.rs +++ b/src/decoding/ringbuffer.rs @@ -1,37 +1,25 @@ -use alloc::alloc::{alloc, dealloc}; -use core::{alloc::Layout, ptr::NonNull, slice}; +use alloc::boxed::Box; +use core::{mem::MaybeUninit, slice}; pub struct RingBuffer { // Safety invariants: // - // 1. - // a.`buf` must be a valid allocation of capacity `cap` - // b. ...unless `cap=0`, in which case it is dangling - // 2. If tail≥head + // 1. If tail≥head // a. `head..tail` must contain initialized memory. // b. Else, `head..` and `..tail` must be initialized - // 3. `head` and `tail` are in bounds (≥ 0 and < cap) - // 4. `tail` is never `cap` except for a full buffer, and instead uses the value `0`. In other words, `tail` always points to the place + // 2. `head` and `tail` are in bounds (≥ 0 and < cap) + // 3. `tail` is never `cap` except for a full buffer, and instead uses the value `0`. In other words, `tail` always points to the place // where the next element would go (if there is space) - buf: NonNull, - cap: usize, + buf: Box<[MaybeUninit]>, head: usize, tail: usize, } -// SAFETY: RingBuffer does not hold any thread specific values -> it can be sent to another thread -> RingBuffer is Send -unsafe impl Send for RingBuffer {} - -// SAFETY: Ringbuffer does not provide unsyncronized interior mutability which makes &RingBuffer Send -> RingBuffer is Sync -unsafe impl Sync for RingBuffer {} - impl RingBuffer { pub fn new() -> Self { RingBuffer { - // SAFETY: Upholds invariant 1a as stated - buf: NonNull::dangling(), - cap: 0, - // SAFETY: Upholds invariant 2-4 + buf: Box::new_uninit_slice(0), + // SAFETY: Upholds invariant 1-3 head: 0, tail: 0, } @@ -43,6 +31,11 @@ impl RingBuffer { x + y } + /// Return the total capacity in the buffer + pub fn capacity(&self) -> usize { + self.buf.len() + } + /// Return the amount of available space (in bytes) of the buffer. pub fn free(&self) -> usize { let (x, y) = self.free_slice_lengths(); @@ -51,8 +44,8 @@ impl RingBuffer { /// Empty the buffer and reset the head and tail. pub fn clear(&mut self) { - // SAFETY: Upholds invariant 2, trivially - // SAFETY: Upholds invariant 3; 0 is always valid + // SAFETY: Upholds invariant 1, trivially + // SAFETY: Upholds invariant 2; 0 is always valid self.head = 0; self.tail = 0; } @@ -75,65 +68,44 @@ impl RingBuffer { #[inline(never)] #[cold] fn reserve_amortized(&mut self, amount: usize) { - // SAFETY: if we were succesfully able to construct this layout when we allocated then it's also valid do so now - let current_layout = unsafe { Layout::array::(self.cap).unwrap_unchecked() }; - // Always have at least 1 unused element as the sentinel. let new_cap = usize::max( - self.cap.next_power_of_two(), - (self.cap + amount).next_power_of_two(), + self.capacity().next_power_of_two(), + (self.capacity() + amount).next_power_of_two(), ) + 1; - // Check that the capacity isn't bigger than isize::MAX, which is the max allowed by LLVM, or that - // we are on a >= 64 bit system which will never allow that much memory to be allocated - #[allow(clippy::assertions_on_constants)] - { - debug_assert!(usize::BITS >= 64 || new_cap < isize::MAX as usize); - } - - let new_layout = Layout::array::(new_cap) - .unwrap_or_else(|_| panic!("Could not create layout for u8 array of size {}", new_cap)); - - // alloc the new memory region and panic if alloc fails - // TODO maybe rework this to generate an error? - let new_buf = unsafe { - let new_buf = alloc(new_layout); - - NonNull::new(new_buf).expect("Allocating new space for the ringbuffer failed") - }; + let mut new_buf = Box::new_uninit_slice(new_cap); // If we had data before, copy it over to the newly alloced memory region - if self.cap > 0 { - let ((s1_ptr, s1_len), (s2_ptr, s2_len)) = self.data_slice_parts(); + if self.capacity() > 0 { + let (a, b) = self.as_slices(); + let new_buf_ptr = new_buf.as_mut_ptr().cast::(); unsafe { - // SAFETY: Upholds invariant 2, we end up populating (0..(len₁ + len₂)) - new_buf.as_ptr().copy_from_nonoverlapping(s1_ptr, s1_len); - new_buf - .as_ptr() - .add(s1_len) - .copy_from_nonoverlapping(s2_ptr, s2_len); - dealloc(self.buf.as_ptr(), current_layout); + // SAFETY: Upholds invariant 1, we end up populating (0..(len₁ + len₂)) + new_buf_ptr.copy_from_nonoverlapping(a.as_ptr(), a.len()); + new_buf_ptr + .add(a.len()) + .copy_from_nonoverlapping(b.as_ptr(), b.len()); } - // SAFETY: Upholds invariant 3, head is 0 and in bounds, tail is only ever `cap` if the buffer + // SAFETY: Upholds invariant 2, head is 0 and in bounds, tail is only ever `cap` if the buffer // is entirely full - self.tail = s1_len + s2_len; + self.tail = a.len() + b.len(); self.head = 0; } - // SAFETY: Upholds invariant 1: the buffer was just allocated correctly + self.buf = new_buf; - self.cap = new_cap; } #[allow(dead_code)] pub fn push_back(&mut self, byte: u8) { self.reserve(1); - // SAFETY: Upholds invariant 2 by writing initialized memory - unsafe { self.buf.as_ptr().add(self.tail).write(byte) }; - // SAFETY: Upholds invariant 3 by wrapping `tail` around - self.tail = (self.tail + 1) % self.cap; + // SAFETY: Upholds invariant 1 by writing initialized memory + unsafe { *self.buf.get_unchecked_mut(self.tail) = MaybeUninit::new(byte) } + // SAFETY: Upholds invariant 2 by wrapping `tail` around + self.tail = (self.tail + 1) % self.capacity(); } /// Fetch the byte stored at the selected index from the buffer, returning it, or @@ -142,47 +114,66 @@ impl RingBuffer { pub fn get(&self, idx: usize) -> Option { if idx < self.len() { // SAFETY: Establishes invariants on memory being initialized and the range being in-bounds - // (Invariants 2 & 3) - let idx = (self.head + idx) % self.cap; - Some(unsafe { self.buf.as_ptr().add(idx).read() }) + // (Invariants 1 & 2) + let idx = (self.head + idx) % self.capacity(); + Some(unsafe { self.buf.get_unchecked(idx).assume_init_read() }) } else { None } } + /// Append the provided data to the end of `self`. pub fn extend(&mut self, data: &[u8]) { - let len = data.len(); - let ptr = data.as_ptr(); - if len == 0 { + if data.is_empty() { return; } - self.reserve(len); - - debug_assert!(self.len() + len <= self.cap - 1); - debug_assert!(self.free() >= len, "free: {} len: {}", self.free(), len); - - let ((f1_ptr, f1_len), (f2_ptr, f2_len)) = self.free_slice_parts(); - debug_assert!(f1_len + f2_len >= len, "{} + {} < {}", f1_len, f2_len, len); - - let in_f1 = usize::min(len, f1_len); - - let in_f2 = len - in_f1; + self.reserve(data.len()); - debug_assert!(in_f1 + in_f2 == len); + let (a, b) = self.free_slice_parts(); + if let Some((src1, src2)) = data.split_at_checked(a.len()) { + debug_assert!( + src1.len() <= a.len(), + "{} does not fit {}", + src1.len(), + a.len() + ); + debug_assert!( + src2.len() <= b.len(), + "{} does not fit {}", + src2.len(), + a.len() + ); - unsafe { // SAFETY: `in_f₁ + in_f₂ = len`, so this writes `len` bytes total - // upholding invariant 2 - if in_f1 > 0 { - f1_ptr.copy_from_nonoverlapping(ptr, in_f1); + // upholding invariant 1 + unsafe { + a.as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(src1.as_ptr(), src1.len()); + b.as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(src2.as_ptr(), src2.len()); } - if in_f2 > 0 { - f2_ptr.copy_from_nonoverlapping(ptr.add(in_f1), in_f2); + } else { + debug_assert!( + data.len() <= a.len(), + "{} does not fit {}", + data.len(), + a.len() + ); + + // SAFETY: `in_f₁ + in_f₂ = len`, so this writes `len` bytes total + // upholding invariant 1 + unsafe { + a.as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(data.as_ptr(), data.len()); } } + // SAFETY: Upholds invariant 3 by wrapping `tail` around. - self.tail = (self.tail + len) % self.cap; + self.tail = (self.tail + data.len()) % self.capacity(); } /// Advance head past `amount` elements, effectively removing @@ -192,7 +183,7 @@ impl RingBuffer { let amount = usize::min(amount, self.len()); // SAFETY: we maintain invariant 2 here since this will always lead to a smaller buffer // for amount≤len - self.head = (self.head + amount) % self.cap; + self.head = (self.head + amount) % self.capacity(); } /// Return the size of the two contiguous occupied sections of memory used @@ -207,34 +198,23 @@ impl RingBuffer { len_after_head = self.tail - self.head; len_to_tail = 0; } else { - len_after_head = self.cap - self.head; + len_after_head = self.capacity() - self.head; len_to_tail = self.tail; } (len_after_head, len_to_tail) } - // SAFETY: other code relies on this pointing to initialized halves of the buffer only - /// Return pointers to the head and tail, and the length of each section. - fn data_slice_parts(&self) -> ((*const u8, usize), (*const u8, usize)) { + /// Return references to each part of the ring buffer. + pub fn as_slices(&self) -> (&[u8], &[u8]) { let (len_after_head, len_to_tail) = self.data_slice_lengths(); + let buf_ptr = self.buf.as_ptr().cast::(); ( - (unsafe { self.buf.as_ptr().add(self.head) }, len_after_head), - (self.buf.as_ptr(), len_to_tail), + unsafe { slice::from_raw_parts(buf_ptr.add(self.head), len_after_head) }, + unsafe { slice::from_raw_parts(buf_ptr, len_to_tail) }, ) } - /// Return references to each part of the ring buffer. - pub fn as_slices(&self) -> (&[u8], &[u8]) { - let (s1, s2) = self.data_slice_parts(); - unsafe { - // SAFETY: relies on the behavior of data_slice_parts for producing initialized memory - let s1 = slice::from_raw_parts(s1.0, s1.1); - let s2 = slice::from_raw_parts(s2.0, s2.1); - (s1, s2) - } - } - // SAFETY: other code relies on this producing the lengths of free zones // at the beginning/end of the buffer. Everything else must be initialized /// Returns the size of the two unoccupied sections of memory used by the buffer. @@ -247,7 +227,7 @@ impl RingBuffer { len_after_tail = self.head - self.tail; len_to_head = 0; } else { - len_after_tail = self.cap - self.tail; + len_after_tail = self.capacity() - self.tail; len_to_head = self.head; } (len_to_head, len_after_tail) @@ -257,12 +237,13 @@ impl RingBuffer { /// for the two sections in the buffer. // SAFETY: Other code relies on this pointing to the free zones, data after the first and before the second must // be valid - fn free_slice_parts(&self) -> ((*mut u8, usize), (*mut u8, usize)) { + fn free_slice_parts(&mut self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { let (len_to_head, len_after_tail) = self.free_slice_lengths(); + let buf_ptr = self.buf.as_mut_ptr(); ( - (unsafe { self.buf.as_ptr().add(self.tail) }, len_after_tail), - (self.buf.as_ptr(), len_to_head), + unsafe { slice::from_raw_parts_mut(buf_ptr.add(self.tail), len_after_tail) }, + unsafe { slice::from_raw_parts_mut(buf_ptr, len_to_head) }, ) } @@ -281,8 +262,8 @@ impl RingBuffer { self.reserve(len); // SAFETY: Requirements checked: - // 1. explicitly checked above, resulting in a panic if it does not hold - // 2. explicitly reserved enough memory + // 2. explicitly checked above, resulting in a panic if it does not hold + // 3. explicitly reserved enough memory unsafe { self.extend_from_within_unchecked(start, len) } } @@ -291,13 +272,16 @@ impl RingBuffer { /// /// SAFETY: /// For this to be safe two requirements need to hold: - /// 1. start + len <= self.len() so we do not copy uninitialised memory - /// 2. More then len reserved space so we do not write out-of-bounds + /// 2. start + len <= self.len() so we do not copy uninitialised memory + /// 3. More then len reserved space so we do not write out-of-bounds #[warn(unsafe_op_in_unsafe_fn)] pub unsafe fn extend_from_within_unchecked(&mut self, start: usize, len: usize) { debug_assert!(start + len <= self.len()); debug_assert!(self.free() >= len); + let capacity = self.capacity(); + let buf_ptr = self.buf.as_mut_ptr().cast::(); + if self.head < self.tail { // Continuous source section and possibly non continuous write section: // @@ -311,20 +295,20 @@ impl RingBuffer { // S: Source bytes, to be copied to D bytes // D: Destination bytes, going to be copied from S bytes // _: Uninvolved bytes in the writable section - let after_tail = usize::min(len, self.cap - self.tail); + let after_tail = usize::min(len, capacity - self.tail); let src = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(self.head + start) }.cast_const(), + unsafe { buf_ptr.add(self.head + start) }.cast_const(), // Src length (see above diagram) self.tail - self.head - start, ); let dst = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(self.tail) }, + unsafe { buf_ptr.add(self.tail) }, // Dst length (see above diagram) - self.cap - self.tail, + capacity - self.tail, ); // SAFETY: `src` points at initialized data, `dst` points to writable memory @@ -352,8 +336,7 @@ impl RingBuffer { src.1 - after_tail, ); let dst = ( - self.buf.as_ptr(), - // Dst length overflowing (see above diagram) + buf_ptr, // Dst length overflowing (see above diagram) self.head, ); @@ -362,7 +345,7 @@ impl RingBuffer { unsafe { copy_bytes_overshooting(src, dst, len - after_tail) } } } else { - if self.head + start > self.cap { + if self.head + start > capacity { // Continuous read section and destination section: // // T H @@ -376,18 +359,18 @@ impl RingBuffer { // D: Destination bytes, going to be copied from S bytes // _: Uninvolved bytes in the writable section - let start = (self.head + start) % self.cap; + let start = (self.head + start) % capacity; let src = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(start) }.cast_const(), + unsafe { buf_ptr.add(start) }.cast_const(), // Src length (see above diagram) self.tail - start, ); let dst = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(self.tail) }, // Dst length (see above diagram) + unsafe { buf_ptr.add(self.tail) }, // Dst length (see above diagram) // Dst length (see above diagram) self.head - self.tail, ); @@ -409,18 +392,18 @@ impl RingBuffer { // D: Destination bytes, going to be copied from S bytes // _: Uninvolved bytes in the writable section - let after_start = usize::min(len, self.cap - self.head - start); + let after_start = usize::min(len, capacity - self.head - start); let src = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(self.head + start) }.cast_const(), + unsafe { buf_ptr.add(self.head + start) }.cast_const(), // Src length - chunk 1 (see above diagram on the right) - self.cap - self.head - start, + capacity - self.head - start, ); let dst = ( // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { self.buf.as_ptr().add(self.tail) }, + unsafe { buf_ptr.add(self.tail) }, // Dst length (see above diagram) self.head - self.tail, ); @@ -444,7 +427,7 @@ impl RingBuffer { // _: Uninvolved bytes in the writable section let src = ( - self.buf.as_ptr().cast_const(), + buf_ptr.cast_const(), // Src length - chunk 2 (see above diagram on the left) self.tail, ); @@ -463,23 +446,7 @@ impl RingBuffer { } } - self.tail = (self.tail + len) % self.cap; - } -} - -impl Drop for RingBuffer { - fn drop(&mut self) { - if self.cap == 0 { - return; - } - - // SAFETY: is we were succesfully able to construct this layout when we allocated then it's also valid do so now - // Relies on / establishes invariant 1 - let current_layout = unsafe { Layout::array::(self.cap).unwrap_unchecked() }; - - unsafe { - dealloc(self.buf.as_ptr(), current_layout); - } + self.tail = (self.tail + len) % capacity; } } @@ -554,7 +521,7 @@ mod tests { let mut rb = RingBuffer::new(); rb.reserve(15); - assert_eq!(17, rb.cap); + assert_eq!(17, rb.capacity()); rb.extend(b"0123456789"); assert_eq!(rb.len(), 10); @@ -607,9 +574,9 @@ mod tests { // Fill exactly, then empty then fill again let mut rb = RingBuffer::new(); rb.reserve(16); - assert_eq!(17, rb.cap); + assert_eq!(17, rb.capacity()); rb.extend(b"0123456789012345"); - assert_eq!(17, rb.cap); + assert_eq!(17, rb.capacity()); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); rb.drop_first_n(16); @@ -618,7 +585,7 @@ mod tests { rb.extend(b"0123456789012345"); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(17, rb.cap); + assert_eq!(17, rb.capacity()); assert_eq!(1, rb.as_slices().0.len()); assert_eq!(15, rb.as_slices().1.len()); @@ -630,13 +597,13 @@ mod tests { rb.extend(b"67890123"); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(17, rb.cap); + assert_eq!(17, rb.capacity()); assert_eq!(9, rb.as_slices().0.len()); assert_eq!(7, rb.as_slices().1.len()); rb.reserve(1); assert_eq!(16, rb.len()); assert_eq!(16, rb.free()); - assert_eq!(33, rb.cap); + assert_eq!(33, rb.capacity()); assert_eq!(16, rb.as_slices().0.len()); assert_eq!(0, rb.as_slices().1.len()); @@ -647,7 +614,7 @@ mod tests { rb.extend_from_within(0, 16); assert_eq!(32, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(33, rb.cap); + assert_eq!(33, rb.capacity()); assert_eq!(32, rb.as_slices().0.len()); assert_eq!(0, rb.as_slices().1.len()); From d0fbe211b762e445d39cf475a9792b503272a387 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Fri, 29 Nov 2024 12:10:45 +0100 Subject: [PATCH 3/5] vecdeque --- src/decoding/ringbuffer.rs | 650 +++++++++++++------------------------ 1 file changed, 218 insertions(+), 432 deletions(-) diff --git a/src/decoding/ringbuffer.rs b/src/decoding/ringbuffer.rs index e82e641a..6a302140 100644 --- a/src/decoding/ringbuffer.rs +++ b/src/decoding/ringbuffer.rs @@ -1,250 +1,103 @@ -use alloc::boxed::Box; -use core::{mem::MaybeUninit, slice}; +use alloc::collections::VecDeque; +use core::{cmp, hint::unreachable_unchecked, mem::MaybeUninit, slice}; pub struct RingBuffer { - // Safety invariants: - // - // 1. If tail≥head - // a. `head..tail` must contain initialized memory. - // b. Else, `head..` and `..tail` must be initialized - // 2. `head` and `tail` are in bounds (≥ 0 and < cap) - // 3. `tail` is never `cap` except for a full buffer, and instead uses the value `0`. In other words, `tail` always points to the place - // where the next element would go (if there is space) - buf: Box<[MaybeUninit]>, - head: usize, - tail: usize, + buf: VecDeque>, } impl RingBuffer { pub fn new() -> Self { RingBuffer { - buf: Box::new_uninit_slice(0), - // SAFETY: Upholds invariant 1-3 - head: 0, - tail: 0, + buf: VecDeque::new(), } } /// Return the number of bytes in the buffer. pub fn len(&self) -> usize { - let (x, y) = self.data_slice_lengths(); - x + y + self.buf.len() } /// Return the total capacity in the buffer + #[cfg(test)] pub fn capacity(&self) -> usize { - self.buf.len() + self.buf.capacity() } /// Return the amount of available space (in bytes) of the buffer. pub fn free(&self) -> usize { - let (x, y) = self.free_slice_lengths(); - (x + y).saturating_sub(1) + let len = self.buf.len(); + let capacity = self.buf.capacity(); + if len > capacity { + unsafe { unreachable_unchecked() } + } + + capacity - len } /// Empty the buffer and reset the head and tail. pub fn clear(&mut self) { - // SAFETY: Upholds invariant 1, trivially - // SAFETY: Upholds invariant 2; 0 is always valid - self.head = 0; - self.tail = 0; + self.buf.clear(); } /// Whether the buffer is empty pub fn is_empty(&self) -> bool { - self.head == self.tail + self.buf.is_empty() } /// Ensure that there's space for `amount` elements in the buffer. - pub fn reserve(&mut self, amount: usize) { - let free = self.free(); - if free >= amount { - return; + pub fn reserve(&mut self, additional: usize) { + if self.free() < additional { + self.reserve_amortized(additional); } - self.reserve_amortized(amount - free); + if self.free() < additional { + unsafe { unreachable_unchecked() } + } } #[inline(never)] #[cold] - fn reserve_amortized(&mut self, amount: usize) { - // Always have at least 1 unused element as the sentinel. - let new_cap = usize::max( - self.capacity().next_power_of_two(), - (self.capacity() + amount).next_power_of_two(), - ) + 1; - - let mut new_buf = Box::new_uninit_slice(new_cap); - - // If we had data before, copy it over to the newly alloced memory region - if self.capacity() > 0 { - let (a, b) = self.as_slices(); - - let new_buf_ptr = new_buf.as_mut_ptr().cast::(); - unsafe { - // SAFETY: Upholds invariant 1, we end up populating (0..(len₁ + len₂)) - new_buf_ptr.copy_from_nonoverlapping(a.as_ptr(), a.len()); - new_buf_ptr - .add(a.len()) - .copy_from_nonoverlapping(b.as_ptr(), b.len()); - } - - // SAFETY: Upholds invariant 2, head is 0 and in bounds, tail is only ever `cap` if the buffer - // is entirely full - self.tail = a.len() + b.len(); - self.head = 0; - } - - self.buf = new_buf; + fn reserve_amortized(&mut self, additional: usize) { + self.buf.reserve(additional); } #[allow(dead_code)] pub fn push_back(&mut self, byte: u8) { self.reserve(1); - - // SAFETY: Upholds invariant 1 by writing initialized memory - unsafe { *self.buf.get_unchecked_mut(self.tail) = MaybeUninit::new(byte) } - // SAFETY: Upholds invariant 2 by wrapping `tail` around - self.tail = (self.tail + 1) % self.capacity(); + self.buf.push_back(MaybeUninit::new(byte)); } /// Fetch the byte stored at the selected index from the buffer, returning it, or /// `None` if the index is out of bounds. #[allow(dead_code)] pub fn get(&self, idx: usize) -> Option { - if idx < self.len() { - // SAFETY: Establishes invariants on memory being initialized and the range being in-bounds - // (Invariants 1 & 2) - let idx = (self.head + idx) % self.capacity(); - Some(unsafe { self.buf.get_unchecked(idx).assume_init_read() }) - } else { - None - } + self.buf + .get(idx) + .map(|&byte| unsafe { MaybeUninit::assume_init(byte) }) } /// Append the provided data to the end of `self`. pub fn extend(&mut self, data: &[u8]) { - if data.is_empty() { - return; - } - - self.reserve(data.len()); - - let (a, b) = self.free_slice_parts(); - if let Some((src1, src2)) = data.split_at_checked(a.len()) { - debug_assert!( - src1.len() <= a.len(), - "{} does not fit {}", - src1.len(), - a.len() - ); - debug_assert!( - src2.len() <= b.len(), - "{} does not fit {}", - src2.len(), - a.len() - ); - - // SAFETY: `in_f₁ + in_f₂ = len`, so this writes `len` bytes total - // upholding invariant 1 - unsafe { - a.as_mut_ptr() - .cast::() - .copy_from_nonoverlapping(src1.as_ptr(), src1.len()); - b.as_mut_ptr() - .cast::() - .copy_from_nonoverlapping(src2.as_ptr(), src2.len()); - } - } else { - debug_assert!( - data.len() <= a.len(), - "{} does not fit {}", - data.len(), - a.len() - ); - - // SAFETY: `in_f₁ + in_f₂ = len`, so this writes `len` bytes total - // upholding invariant 1 - unsafe { - a.as_mut_ptr() - .cast::() - .copy_from_nonoverlapping(data.as_ptr(), data.len()); - } - } - - // SAFETY: Upholds invariant 3 by wrapping `tail` around. - self.tail = (self.tail + data.len()) % self.capacity(); + let len = data.len(); + let data = data.as_ptr().cast::>(); + let data = unsafe { slice::from_raw_parts(data, len) }; + self.buf.extend(data); } /// Advance head past `amount` elements, effectively removing /// them from the buffer. pub fn drop_first_n(&mut self, amount: usize) { debug_assert!(amount <= self.len()); - let amount = usize::min(amount, self.len()); - // SAFETY: we maintain invariant 2 here since this will always lead to a smaller buffer - // for amount≤len - self.head = (self.head + amount) % self.capacity(); - } - - /// Return the size of the two contiguous occupied sections of memory used - /// by the buffer. - // SAFETY: other code relies on this pointing to initialized halves of the buffer only - fn data_slice_lengths(&self) -> (usize, usize) { - let len_after_head; - let len_to_tail; - - // TODO can we do this branchless? - if self.tail >= self.head { - len_after_head = self.tail - self.head; - len_to_tail = 0; - } else { - len_after_head = self.capacity() - self.head; - len_to_tail = self.tail; - } - (len_after_head, len_to_tail) + self.buf.drain(..amount); } /// Return references to each part of the ring buffer. pub fn as_slices(&self) -> (&[u8], &[u8]) { - let (len_after_head, len_to_tail) = self.data_slice_lengths(); - - let buf_ptr = self.buf.as_ptr().cast::(); - ( - unsafe { slice::from_raw_parts(buf_ptr.add(self.head), len_after_head) }, - unsafe { slice::from_raw_parts(buf_ptr, len_to_tail) }, - ) - } - - // SAFETY: other code relies on this producing the lengths of free zones - // at the beginning/end of the buffer. Everything else must be initialized - /// Returns the size of the two unoccupied sections of memory used by the buffer. - fn free_slice_lengths(&self) -> (usize, usize) { - let len_to_head; - let len_after_tail; - - // TODO can we do this branchless? - if self.tail < self.head { - len_after_tail = self.head - self.tail; - len_to_head = 0; - } else { - len_after_tail = self.capacity() - self.tail; - len_to_head = self.head; - } - (len_to_head, len_after_tail) - } - - /// Returns mutable references to the available space and the size of that available space, - /// for the two sections in the buffer. - // SAFETY: Other code relies on this pointing to the free zones, data after the first and before the second must - // be valid - fn free_slice_parts(&mut self) -> (&mut [MaybeUninit], &mut [MaybeUninit]) { - let (len_to_head, len_after_tail) = self.free_slice_lengths(); + let (a, b) = self.buf.as_slices(); - let buf_ptr = self.buf.as_mut_ptr(); - ( - unsafe { slice::from_raw_parts_mut(buf_ptr.add(self.tail), len_after_tail) }, - unsafe { slice::from_raw_parts_mut(buf_ptr, len_to_head) }, - ) + (unsafe { slice_assume_init_ref_polyfill(a) }, unsafe { + slice_assume_init_ref_polyfill(b) + }) } /// Copies elements from the provided range to the end of the buffer. @@ -275,178 +128,127 @@ impl RingBuffer { /// 2. start + len <= self.len() so we do not copy uninitialised memory /// 3. More then len reserved space so we do not write out-of-bounds #[warn(unsafe_op_in_unsafe_fn)] - pub unsafe fn extend_from_within_unchecked(&mut self, start: usize, len: usize) { + pub unsafe fn extend_from_within_unchecked(&mut self, mut start: usize, len: usize) { debug_assert!(start + len <= self.len()); debug_assert!(self.free() >= len); - let capacity = self.capacity(); - let buf_ptr = self.buf.as_mut_ptr().cast::(); - - if self.head < self.tail { - // Continuous source section and possibly non continuous write section: - // - // H T - // Read: ____XXXXSSSSXXXX________ - // Write: ________________DDDD____ - // - // H: Head position (first readable byte) - // T: Tail position (first writable byte) - // X: Uninvolved bytes in the readable section - // S: Source bytes, to be copied to D bytes - // D: Destination bytes, going to be copied from S bytes - // _: Uninvolved bytes in the writable section - let after_tail = usize::min(len, capacity - self.tail); - - let src = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(self.head + start) }.cast_const(), - // Src length (see above diagram) - self.tail - self.head - start, - ); - - let dst = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(self.tail) }, - // Dst length (see above diagram) - capacity - self.tail, - ); + if self.free() < len { + unsafe { unreachable_unchecked() } + } - // SAFETY: `src` points at initialized data, `dst` points to writable memory - // and does not overlap `src`. - unsafe { copy_bytes_overshooting(src, dst, after_tail) } - - if after_tail < len { - // The write section was not continuous: - // - // H T - // Read: ____XXXXSSSSXXXX__ - // Write: DD______________DD - // - // H: Head position (first readable byte) - // T: Tail position (first writable byte) - // X: Uninvolved bytes in the readable section - // S: Source bytes, to be copied to D bytes - // D: Destination bytes, going to be copied from S bytes - // _: Uninvolved bytes in the writable section - - let src = ( - // SAFETY: we are still within the memory range of `buf` - unsafe { src.0.add(after_tail) }, - // Src length (see above diagram) - src.1 - after_tail, - ); - let dst = ( - buf_ptr, // Dst length overflowing (see above diagram) - self.head, - ); - - // SAFETY: `src` points at initialized data, `dst` points to writable memory - // and does not overlap `src`. - unsafe { copy_bytes_overshooting(src, dst, len - after_tail) } - } - } else { - if self.head + start > capacity { - // Continuous read section and destination section: - // - // T H - // Read: XXSSSSXXXX____________XX - // Write: __________DDDD__________ - // - // H: Head position (first readable byte) - // T: Tail position (first writable byte) - // X: Uninvolved bytes in the readable section - // S: Source bytes, to be copied to D bytes - // D: Destination bytes, going to be copied from S bytes - // _: Uninvolved bytes in the writable section - - let start = (self.head + start) % capacity; - - let src = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(start) }.cast_const(), - // Src length (see above diagram) - self.tail - start, - ); - - let dst = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(self.tail) }, // Dst length (see above diagram) - // Dst length (see above diagram) - self.head - self.tail, - ); - - // SAFETY: `src` points at initialized data, `dst` points to writable memory - // and does not overlap `src`. - unsafe { copy_bytes_overshooting(src, dst, len) } - } else { - // Possibly non continuous read section and continuous destination section: - // - // T H - // Read: XXXX____________XXSSSSXX - // Write: ____DDDD________________ - // - // H: Head position (first readable byte) - // T: Tail position (first writable byte) - // X: Uninvolved bytes in the readable section - // S: Source bytes, to be copied to D bytes - // D: Destination bytes, going to be copied from S bytes - // _: Uninvolved bytes in the writable section - - let after_start = usize::min(len, capacity - self.head - start); - - let src = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(self.head + start) }.cast_const(), - // Src length - chunk 1 (see above diagram on the right) - capacity - self.head - start, - ); - - let dst = ( - // SAFETY: `len <= isize::MAX` and fits the memory range of `buf` - unsafe { buf_ptr.add(self.tail) }, - // Dst length (see above diagram) - self.head - self.tail, - ); - - // SAFETY: `src` points at initialized data, `dst` points to writable memory - // and does not overlap `src`. - unsafe { copy_bytes_overshooting(src, dst, after_start) } - - if after_start < len { - // The read section was not continuous: - // - // T H - // Read: SSXXXXXX____________XXSS - // Write: ________DDDD____________ - // - // H: Head position (first readable byte) - // T: Tail position (first writable byte) - // X: Uninvolved bytes in the readable section - // S: Source bytes, to be copied to D bytes - // D: Destination bytes, going to be copied from S bytes - // _: Uninvolved bytes in the writable section - - let src = ( - buf_ptr.cast_const(), - // Src length - chunk 2 (see above diagram on the left) - self.tail, - ); - - let dst = ( - // SAFETY: we are still within the memory range of `buf` - unsafe { dst.0.add(after_start) }, - // Dst length (see above diagram) - dst.1 - after_start, - ); - - // SAFETY: `src` points at initialized data, `dst` points to writable memory - // and does not overlap `src`. - unsafe { copy_bytes_overshooting(src, dst, len - after_start) } - } + let original_len = self.len(); + let mut intermediate = { + IntermediateRingBuffer { + this: self, + original_len, + disarmed: false, } + }; + + intermediate + .this + .buf + .extend((0..len).map(|_| MaybeUninit::uninit())); + debug_assert_eq!(intermediate.this.buf.len(), original_len + len); + + let (a, b, a_spare, b_spare) = intermediate.as_slices_spare_mut(); + debug_assert_eq!(a_spare.len() + b_spare.len(), len); + + let skip = cmp::min(a.len(), start); + start -= skip; + let a = &a[skip..]; + let b = unsafe { b.get_unchecked(start..) }; + + let mut remaining_copy_len = len; + + // A -> A Spare + let copy_at_least = cmp::min(cmp::min(a.len(), a_spare.len()), remaining_copy_len); + copy_bytes_overshooting(a, a_spare, copy_at_least); + remaining_copy_len -= copy_at_least; + + if remaining_copy_len == 0 { + intermediate.disarmed = true; + return; + } + + let a = &a[copy_at_least..]; + let a_spare = &mut a_spare[copy_at_least..]; + + // A -> B Spare + let copy_at_least = cmp::min(cmp::min(a.len(), b_spare.len()), remaining_copy_len); + copy_bytes_overshooting(a, b_spare, copy_at_least); + remaining_copy_len -= copy_at_least; + + if remaining_copy_len == 0 { + intermediate.disarmed = true; + return; + } + + let b_spare = &mut b_spare[copy_at_least..]; + + // B -> A Spare + let copy_at_least = cmp::min(cmp::min(b.len(), a_spare.len()), remaining_copy_len); + copy_bytes_overshooting(b, a_spare, copy_at_least); + remaining_copy_len -= copy_at_least; + + if remaining_copy_len == 0 { + intermediate.disarmed = true; + return; } - self.tail = (self.tail + len) % capacity; + let b = &b[copy_at_least..]; + + // B -> B Spare + let copy_at_least = cmp::min(cmp::min(b.len(), b_spare.len()), remaining_copy_len); + copy_bytes_overshooting(b, b_spare, copy_at_least); + remaining_copy_len -= copy_at_least; + + debug_assert_eq!(remaining_copy_len, 0); + + intermediate.disarmed = true; + } +} + +struct IntermediateRingBuffer<'a> { + this: &'a mut RingBuffer, + original_len: usize, + disarmed: bool, +} + +impl<'a> IntermediateRingBuffer<'a> { + // inspired by `Vec::split_at_spare_mut` + fn as_slices_spare_mut( + &mut self, + ) -> (&[u8], &[u8], &mut [MaybeUninit], &mut [MaybeUninit]) { + let (a, b) = self.this.buf.as_mut_slices(); + debug_assert!(a.len() + b.len() >= self.original_len); + + let mut remaining_init_len = self.original_len; + let a_mid = cmp::min(a.len(), remaining_init_len); + remaining_init_len -= a_mid; + let b_mid = remaining_init_len; + debug_assert!(b.len() >= b_mid); + + let (a, a_spare) = unsafe { a.split_at_mut_unchecked(a_mid) }; + let (b, b_spare) = unsafe { b.split_at_mut_unchecked(b_mid) }; + debug_assert!(a_spare.is_empty() || b.is_empty()); + + ( + unsafe { slice_assume_init_ref_polyfill(a) }, + unsafe { slice_assume_init_ref_polyfill(b) }, + a_spare, + b_spare, + ) + } +} + +impl<'a> Drop for IntermediateRingBuffer<'a> { + fn drop(&mut self) { + if self.disarmed { + return; + } + + self.this.buf.truncate(self.original_len); } } @@ -463,57 +265,54 @@ impl RingBuffer { /// The chunk size is not part of the contract and may change depending on the target platform. /// /// If that isn't possible we just fall back to ptr::copy_nonoverlapping -#[inline(always)] -unsafe fn copy_bytes_overshooting( - src: (*const u8, usize), - dst: (*mut u8, usize), - copy_at_least: usize, -) { - // By default use usize as the copy size - #[cfg(all(not(target_feature = "sse2"), not(target_feature = "neon")))] - type CopyType = usize; +fn copy_bytes_overshooting(src: &[u8], dst: &mut [MaybeUninit], copy_at_least: usize) { + // this assert is required for this function to be safe + // the optimizer should be able to remove it given how the caller + // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()` + assert!(src.len() >= copy_at_least && dst.len() >= copy_at_least); - // Use u128 if we detect a simd feature - #[cfg(target_feature = "neon")] - type CopyType = u128; - #[cfg(target_feature = "sse2")] - type CopyType = u128; + type CopyType = usize; const COPY_AT_ONCE_SIZE: usize = core::mem::size_of::(); - let min_buffer_size = usize::min(src.1, dst.1); + let min_buffer_size = usize::min(src.len(), dst.len()); + + // this check should be removed by the optimizer thanks to the above assert + // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least` + assert!(min_buffer_size >= copy_at_least); + + // these bounds checks are removed because this is guaranteed: + // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()` + let src = &src[..min_buffer_size]; + let dst = &mut dst[..min_buffer_size]; // Can copy in just one read+write, very common case if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE { - dst.0 - .cast::() - .write_unaligned(src.0.cast::().read_unaligned()) + let chunk = unsafe { src.as_ptr().cast::().read_unaligned() }; + unsafe { dst.as_mut_ptr().cast::().write_unaligned(chunk) }; } else { - let copy_multiple = copy_at_least.next_multiple_of(COPY_AT_ONCE_SIZE); - // Can copy in multiple simple instructions - if min_buffer_size >= copy_multiple { - let mut src_ptr = src.0.cast::(); - let src_ptr_end = src.0.add(copy_multiple).cast::(); - let mut dst_ptr = dst.0.cast::(); - - while src_ptr < src_ptr_end { - dst_ptr.write_unaligned(src_ptr.read_unaligned()); - src_ptr = src_ptr.add(1); - dst_ptr = dst_ptr.add(1); - } - } else { - // Fall back to standard memcopy - dst.0.copy_from_nonoverlapping(src.0, copy_at_least); - } + unsafe { + dst.as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(src.as_ptr(), copy_at_least) + }; } - debug_assert_eq!( - slice::from_raw_parts(src.0, copy_at_least), - slice::from_raw_parts(dst.0, copy_at_least) - ); + debug_assert_eq!(&src[..copy_at_least], unsafe { + slice_assume_init_ref_polyfill(&dst[..copy_at_least]) + }); +} + +#[inline(always)] +unsafe fn slice_assume_init_ref_polyfill(slice: &[MaybeUninit]) -> &[u8] { + let len = slice.len(); + let data = slice.as_ptr().cast::(); + slice::from_raw_parts(data, len) } #[cfg(test)] mod tests { + use std::vec::Vec; + use super::RingBuffer; #[test] @@ -521,52 +320,43 @@ mod tests { let mut rb = RingBuffer::new(); rb.reserve(15); - assert_eq!(17, rb.capacity()); + assert!(rb.capacity() >= 15); rb.extend(b"0123456789"); assert_eq!(rb.len(), 10); - assert_eq!(rb.as_slices().0, b"0123456789"); - assert_eq!(rb.as_slices().1, b""); + assert_eq!(contents(&rb), b"0123456789"); rb.drop_first_n(5); assert_eq!(rb.len(), 5); - assert_eq!(rb.as_slices().0, b"56789"); - assert_eq!(rb.as_slices().1, b""); + assert_eq!(contents(&rb), b"56789"); rb.extend_from_within(2, 3); assert_eq!(rb.len(), 8); - assert_eq!(rb.as_slices().0, b"56789789"); - assert_eq!(rb.as_slices().1, b""); + assert_eq!(contents(&rb), b"56789789"); rb.extend_from_within(0, 3); assert_eq!(rb.len(), 11); - assert_eq!(rb.as_slices().0, b"56789789567"); - assert_eq!(rb.as_slices().1, b""); + assert_eq!(contents(&rb), b"56789789567"); rb.extend_from_within(0, 2); assert_eq!(rb.len(), 13); - assert_eq!(rb.as_slices().0, b"567897895675"); - assert_eq!(rb.as_slices().1, b"6"); + assert_eq!(contents(&rb), b"5678978956756"); rb.drop_first_n(11); assert_eq!(rb.len(), 2); - assert_eq!(rb.as_slices().0, b"5"); - assert_eq!(rb.as_slices().1, b"6"); + assert_eq!(contents(&rb), b"56"); rb.extend(b"0123456789"); assert_eq!(rb.len(), 12); - assert_eq!(rb.as_slices().0, b"5"); - assert_eq!(rb.as_slices().1, b"60123456789"); + assert_eq!(contents(&rb), b"560123456789"); rb.drop_first_n(11); assert_eq!(rb.len(), 1); - assert_eq!(rb.as_slices().0, b"9"); - assert_eq!(rb.as_slices().1, b""); + assert_eq!(contents(&rb), b"9"); rb.extend(b"0123456789"); assert_eq!(rb.len(), 11); - assert_eq!(rb.as_slices().0, b"9012345"); - assert_eq!(rb.as_slices().1, b"6789"); + assert_eq!(contents(&rb), b"90123456789"); } #[test] @@ -574,9 +364,10 @@ mod tests { // Fill exactly, then empty then fill again let mut rb = RingBuffer::new(); rb.reserve(16); - assert_eq!(17, rb.capacity()); + let prev_capacity = rb.capacity(); + assert!(prev_capacity >= 16); rb.extend(b"0123456789012345"); - assert_eq!(17, rb.capacity()); + assert_eq!(prev_capacity, rb.capacity()); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); rb.drop_first_n(16); @@ -585,9 +376,8 @@ mod tests { rb.extend(b"0123456789012345"); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(17, rb.capacity()); - assert_eq!(1, rb.as_slices().0.len()); - assert_eq!(15, rb.as_slices().1.len()); + assert_eq!(prev_capacity, rb.capacity()); + assert_eq!(16, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.clear(); @@ -597,15 +387,13 @@ mod tests { rb.extend(b"67890123"); assert_eq!(16, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(17, rb.capacity()); - assert_eq!(9, rb.as_slices().0.len()); - assert_eq!(7, rb.as_slices().1.len()); + assert_eq!(prev_capacity, rb.capacity()); + assert_eq!(16, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.reserve(1); assert_eq!(16, rb.len()); assert_eq!(16, rb.free()); - assert_eq!(33, rb.capacity()); - assert_eq!(16, rb.as_slices().0.len()); - assert_eq!(0, rb.as_slices().1.len()); + assert!(rb.capacity() >= 17); + assert_eq!(16, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.clear(); @@ -614,7 +402,7 @@ mod tests { rb.extend_from_within(0, 16); assert_eq!(32, rb.len()); assert_eq!(0, rb.free()); - assert_eq!(33, rb.capacity()); + assert!(rb.capacity() >= 32); assert_eq!(32, rb.as_slices().0.len()); assert_eq!(0, rb.as_slices().1.len()); @@ -624,35 +412,33 @@ mod tests { rb.extend(b"01234567"); rb.drop_first_n(5); rb.extend_from_within(0, 3); - assert_eq!(4, rb.as_slices().0.len()); - assert_eq!(2, rb.as_slices().1.len()); + assert_eq!(6, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.drop_first_n(2); - assert_eq!(2, rb.as_slices().0.len()); - assert_eq!(2, rb.as_slices().1.len()); + assert_eq!(4, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.extend_from_within(0, 4); - assert_eq!(2, rb.as_slices().0.len()); - assert_eq!(6, rb.as_slices().1.len()); + assert_eq!(8, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.drop_first_n(2); - assert_eq!(6, rb.as_slices().0.len()); - assert_eq!(0, rb.as_slices().1.len()); + assert_eq!(6, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.drop_first_n(2); assert_eq!(4, rb.as_slices().0.len()); assert_eq!(0, rb.as_slices().1.len()); rb.extend_from_within(0, 4); - assert_eq!(7, rb.as_slices().0.len()); - assert_eq!(1, rb.as_slices().1.len()); + assert_eq!(8, rb.as_slices().0.len() + rb.as_slices().1.len()); let mut rb = RingBuffer::new(); rb.reserve(8); rb.extend(b"11111111"); rb.drop_first_n(7); rb.extend(b"111"); - assert_eq!(2, rb.as_slices().0.len()); - assert_eq!(2, rb.as_slices().1.len()); + assert_eq!(4, rb.as_slices().0.len() + rb.as_slices().1.len()); rb.extend_from_within(0, 4); - assert_eq!(b"11", rb.as_slices().0); - assert_eq!(b"111111", rb.as_slices().1); + assert_eq!(contents(&rb), b"11111111"); + } + + fn contents(rg: &RingBuffer) -> Vec { + let (a, b) = rg.as_slices(); + a.iter().chain(b.iter()).copied().collect() } } From 6d61968e235383529c4625e3c98abd5e90681c3f Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Fri, 29 Nov 2024 12:02:55 +0000 Subject: [PATCH 4/5] safe --- src/decoding/decodebuffer.rs | 32 +------- src/decoding/ringbuffer.rs | 151 ++++++----------------------------- src/lib.rs | 1 + 3 files changed, 28 insertions(+), 156 deletions(-) diff --git a/src/decoding/decodebuffer.rs b/src/decoding/decodebuffer.rs index 5805e265..802936db 100644 --- a/src/decoding/decodebuffer.rs +++ b/src/decoding/decodebuffer.rs @@ -107,20 +107,7 @@ impl DecodeBuffer { // We need to copy in chunks. self.repeat_in_chunks(offset, match_length, start_idx); } else { - // can just copy parts of the existing buffer - // SAFETY: Requirements checked: - // 1. start_idx + match_length must be <= self.buffer.len() - // We know that: - // 1. start_idx = self.buffer.len() - offset - // 2. end_idx = start_idx + match_length - // 3. end_idx <= self.buffer.len() - // Thus follows: start_idx + match_length <= self.buffer.len() - // - // 2. explicitly reserved enough memory for the whole match_length - unsafe { - self.buffer - .extend_from_within_unchecked(start_idx, match_length) - }; + self.buffer.extend_from_within(start_idx, match_length); } self.total_output_counter += match_length as u64; @@ -137,22 +124,7 @@ impl DecodeBuffer { while copied_counter_left > 0 { let chunksize = usize::min(offset, copied_counter_left); - // SAFETY: Requirements checked: - // 1. start_idx + chunksize must be <= self.buffer.len() - // We know that: - // 1. start_idx starts at buffer.len() - offset - // 2. chunksize <= offset (== offset for each iteration but the last, and match_length modulo offset in the last iteration) - // 3. the buffer grows by offset many bytes each iteration but the last - // 4. start_idx is increased by the same amount as the buffer grows each iteration - // - // Thus follows: start_idx + chunksize == self.buffer.len() in each iteration but the last, where match_length modulo offset == chunksize < offset - // Meaning: start_idx + chunksize <= self.buffer.len() - // - // 2. explicitly reserved enough memory for the whole match_length - unsafe { - self.buffer - .extend_from_within_unchecked(start_idx, chunksize) - }; + self.buffer.extend_from_within(start_idx, chunksize); copied_counter_left -= chunksize; start_idx += chunksize; } diff --git a/src/decoding/ringbuffer.rs b/src/decoding/ringbuffer.rs index 6a302140..a40e9315 100644 --- a/src/decoding/ringbuffer.rs +++ b/src/decoding/ringbuffer.rs @@ -1,8 +1,8 @@ use alloc::collections::VecDeque; -use core::{cmp, hint::unreachable_unchecked, mem::MaybeUninit, slice}; +use core::cmp; pub struct RingBuffer { - buf: VecDeque>, + buf: VecDeque, } impl RingBuffer { @@ -24,12 +24,10 @@ impl RingBuffer { } /// Return the amount of available space (in bytes) of the buffer. + #[cfg(test)] pub fn free(&self) -> usize { let len = self.buf.len(); let capacity = self.buf.capacity(); - if len > capacity { - unsafe { unreachable_unchecked() } - } capacity - len } @@ -46,41 +44,23 @@ impl RingBuffer { /// Ensure that there's space for `amount` elements in the buffer. pub fn reserve(&mut self, additional: usize) { - if self.free() < additional { - self.reserve_amortized(additional); - } - - if self.free() < additional { - unsafe { unreachable_unchecked() } - } - } - - #[inline(never)] - #[cold] - fn reserve_amortized(&mut self, additional: usize) { self.buf.reserve(additional); } #[allow(dead_code)] pub fn push_back(&mut self, byte: u8) { - self.reserve(1); - self.buf.push_back(MaybeUninit::new(byte)); + self.buf.push_back(byte); } /// Fetch the byte stored at the selected index from the buffer, returning it, or /// `None` if the index is out of bounds. #[allow(dead_code)] pub fn get(&self, idx: usize) -> Option { - self.buf - .get(idx) - .map(|&byte| unsafe { MaybeUninit::assume_init(byte) }) + self.buf.get(idx).copied() } /// Append the provided data to the end of `self`. pub fn extend(&mut self, data: &[u8]) { - let len = data.len(); - let data = data.as_ptr().cast::>(); - let data = unsafe { slice::from_raw_parts(data, len) }; self.buf.extend(data); } @@ -93,16 +73,12 @@ impl RingBuffer { /// Return references to each part of the ring buffer. pub fn as_slices(&self) -> (&[u8], &[u8]) { - let (a, b) = self.buf.as_slices(); - - (unsafe { slice_assume_init_ref_polyfill(a) }, unsafe { - slice_assume_init_ref_polyfill(b) - }) + self.buf.as_slices() } /// Copies elements from the provided range to the end of the buffer. #[allow(dead_code)] - pub fn extend_from_within(&mut self, start: usize, len: usize) { + pub fn extend_from_within(&mut self, mut start: usize, len: usize) { if start + len > self.len() { panic!( "Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!", @@ -112,28 +88,15 @@ impl RingBuffer { ); } - self.reserve(len); - - // SAFETY: Requirements checked: - // 2. explicitly checked above, resulting in a panic if it does not hold - // 3. explicitly reserved enough memory - unsafe { self.extend_from_within_unchecked(start, len) } - } + // Naive and cheaper implementation (for small lengths) + if len <= 12 { + self.reserve(len); + for i in 0..len { + let byte = self.get(start + i).unwrap(); + self.push_back(byte); + } - /// Copies data from the provided range to the end of the buffer, without - /// first verifying that the unoccupied capacity is available. - /// - /// SAFETY: - /// For this to be safe two requirements need to hold: - /// 2. start + len <= self.len() so we do not copy uninitialised memory - /// 3. More then len reserved space so we do not write out-of-bounds - #[warn(unsafe_op_in_unsafe_fn)] - pub unsafe fn extend_from_within_unchecked(&mut self, mut start: usize, len: usize) { - debug_assert!(start + len <= self.len()); - debug_assert!(self.free() >= len); - - if self.free() < len { - unsafe { unreachable_unchecked() } + return; } let original_len = self.len(); @@ -141,14 +104,10 @@ impl RingBuffer { IntermediateRingBuffer { this: self, original_len, - disarmed: false, } }; - intermediate - .this - .buf - .extend((0..len).map(|_| MaybeUninit::uninit())); + intermediate.this.buf.extend((0..len).map(|_| 0)); debug_assert_eq!(intermediate.this.buf.len(), original_len + len); let (a, b, a_spare, b_spare) = intermediate.as_slices_spare_mut(); @@ -157,7 +116,7 @@ impl RingBuffer { let skip = cmp::min(a.len(), start); start -= skip; let a = &a[skip..]; - let b = unsafe { b.get_unchecked(start..) }; + let b = &b[start..]; let mut remaining_copy_len = len; @@ -167,7 +126,6 @@ impl RingBuffer { remaining_copy_len -= copy_at_least; if remaining_copy_len == 0 { - intermediate.disarmed = true; return; } @@ -180,7 +138,6 @@ impl RingBuffer { remaining_copy_len -= copy_at_least; if remaining_copy_len == 0 { - intermediate.disarmed = true; return; } @@ -192,7 +149,6 @@ impl RingBuffer { remaining_copy_len -= copy_at_least; if remaining_copy_len == 0 { - intermediate.disarmed = true; return; } @@ -204,22 +160,17 @@ impl RingBuffer { remaining_copy_len -= copy_at_least; debug_assert_eq!(remaining_copy_len, 0); - - intermediate.disarmed = true; } } struct IntermediateRingBuffer<'a> { this: &'a mut RingBuffer, original_len: usize, - disarmed: bool, } impl<'a> IntermediateRingBuffer<'a> { // inspired by `Vec::split_at_spare_mut` - fn as_slices_spare_mut( - &mut self, - ) -> (&[u8], &[u8], &mut [MaybeUninit], &mut [MaybeUninit]) { + fn as_slices_spare_mut(&mut self) -> (&[u8], &[u8], &mut [u8], &mut [u8]) { let (a, b) = self.this.buf.as_mut_slices(); debug_assert!(a.len() + b.len() >= self.original_len); @@ -229,26 +180,11 @@ impl<'a> IntermediateRingBuffer<'a> { let b_mid = remaining_init_len; debug_assert!(b.len() >= b_mid); - let (a, a_spare) = unsafe { a.split_at_mut_unchecked(a_mid) }; - let (b, b_spare) = unsafe { b.split_at_mut_unchecked(b_mid) }; + let (a, a_spare) = a.split_at_mut(a_mid); + let (b, b_spare) = b.split_at_mut(b_mid); debug_assert!(a_spare.is_empty() || b.is_empty()); - ( - unsafe { slice_assume_init_ref_polyfill(a) }, - unsafe { slice_assume_init_ref_polyfill(b) }, - a_spare, - b_spare, - ) - } -} - -impl<'a> Drop for IntermediateRingBuffer<'a> { - fn drop(&mut self) { - if self.disarmed { - return; - } - - self.this.buf.truncate(self.original_len); + (a, b, a_spare, b_spare) } } @@ -265,48 +201,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> { /// The chunk size is not part of the contract and may change depending on the target platform. /// /// If that isn't possible we just fall back to ptr::copy_nonoverlapping -fn copy_bytes_overshooting(src: &[u8], dst: &mut [MaybeUninit], copy_at_least: usize) { - // this assert is required for this function to be safe - // the optimizer should be able to remove it given how the caller - // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()` - assert!(src.len() >= copy_at_least && dst.len() >= copy_at_least); - - type CopyType = usize; - - const COPY_AT_ONCE_SIZE: usize = core::mem::size_of::(); - let min_buffer_size = usize::min(src.len(), dst.len()); - - // this check should be removed by the optimizer thanks to the above assert - // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least` - assert!(min_buffer_size >= copy_at_least); - - // these bounds checks are removed because this is guaranteed: - // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()` - let src = &src[..min_buffer_size]; - let dst = &mut dst[..min_buffer_size]; - - // Can copy in just one read+write, very common case - if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE { - let chunk = unsafe { src.as_ptr().cast::().read_unaligned() }; - unsafe { dst.as_mut_ptr().cast::().write_unaligned(chunk) }; - } else { - unsafe { - dst.as_mut_ptr() - .cast::() - .copy_from_nonoverlapping(src.as_ptr(), copy_at_least) - }; - } - - debug_assert_eq!(&src[..copy_at_least], unsafe { - slice_assume_init_ref_polyfill(&dst[..copy_at_least]) - }); -} +fn copy_bytes_overshooting(src: &[u8], dst: &mut [u8], copy_at_least: usize) { + let src = &src[..copy_at_least]; + let dst = &mut dst[..copy_at_least]; -#[inline(always)] -unsafe fn slice_assume_init_ref_polyfill(slice: &[MaybeUninit]) -> &[u8] { - let len = slice.len(); - let data = slice.as_ptr().cast::(); - slice::from_raw_parts(data, len) + dst.copy_from_slice(src); } #[cfg(test)] diff --git a/src/lib.rs b/src/lib.rs index f68e1a20..9940b69a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,6 +14,7 @@ //! than the original implementation. #![no_std] #![deny(trivial_casts, trivial_numeric_casts, rust_2018_idioms)] +#![forbid(unsafe_code)] #[cfg(feature = "std")] extern crate std; From 9f40a8b6f2934077fc90410aabe9688270ad37d4 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Sat, 30 Nov 2024 16:29:50 +0000 Subject: [PATCH 5/5] experiment: go back to unsafe --- src/decoding/ringbuffer.rs | 161 +++++++++++-------------------------- src/lib.rs | 1 - 2 files changed, 45 insertions(+), 117 deletions(-) diff --git a/src/decoding/ringbuffer.rs b/src/decoding/ringbuffer.rs index a40e9315..eb42a689 100644 --- a/src/decoding/ringbuffer.rs +++ b/src/decoding/ringbuffer.rs @@ -1,5 +1,5 @@ use alloc::collections::VecDeque; -use core::cmp; +use core::{cmp, mem::MaybeUninit}; pub struct RingBuffer { buf: VecDeque, @@ -78,7 +78,7 @@ impl RingBuffer { /// Copies elements from the provided range to the end of the buffer. #[allow(dead_code)] - pub fn extend_from_within(&mut self, mut start: usize, len: usize) { + pub fn extend_from_within(&mut self, start: usize, mut len: usize) { if start + len > self.len() { panic!( "Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!", @@ -88,126 +88,55 @@ impl RingBuffer { ); } - // Naive and cheaper implementation (for small lengths) - if len <= 12 { - self.reserve(len); - for i in 0..len { - let byte = self.get(start + i).unwrap(); - self.push_back(byte); + self.reserve(len); + + let mut buf = [MaybeUninit::::uninit(); 2048]; + let mut start = start; + while len > 0 { + let round_len = cmp::min(len, buf.len()); + let mut remaining_len = round_len; + + let (a, b) = self.buf.as_slices(); + let b = if start < a.len() { + let a = &a[start..]; + let end = cmp::min(a.len(), remaining_len); + unsafe { + buf.as_mut_ptr() + .cast::() + .copy_from_nonoverlapping(a.as_ptr(), end); + } + remaining_len -= end; + b + } else { + unsafe { b.get_unchecked(start - a.len()..) } + }; + + if remaining_len > 0 { + unsafe { + buf.as_mut_ptr() + .cast::() + .add(round_len - remaining_len) + .copy_from_nonoverlapping(b.as_ptr(), remaining_len); + } } - return; + /* + let mut i = 0; + self.buf.iter().skip(start).take(len).for_each(|&b| unsafe { + *buf.get_unchecked_mut(i) = MaybeUninit::new(b); + i += 1; + }); + */ + + self.buf.extend(unsafe { + std::slice::from_raw_parts(buf.as_ptr().cast::(), round_len) + }); + len -= round_len; + start += round_len; } - - let original_len = self.len(); - let mut intermediate = { - IntermediateRingBuffer { - this: self, - original_len, - } - }; - - intermediate.this.buf.extend((0..len).map(|_| 0)); - debug_assert_eq!(intermediate.this.buf.len(), original_len + len); - - let (a, b, a_spare, b_spare) = intermediate.as_slices_spare_mut(); - debug_assert_eq!(a_spare.len() + b_spare.len(), len); - - let skip = cmp::min(a.len(), start); - start -= skip; - let a = &a[skip..]; - let b = &b[start..]; - - let mut remaining_copy_len = len; - - // A -> A Spare - let copy_at_least = cmp::min(cmp::min(a.len(), a_spare.len()), remaining_copy_len); - copy_bytes_overshooting(a, a_spare, copy_at_least); - remaining_copy_len -= copy_at_least; - - if remaining_copy_len == 0 { - return; - } - - let a = &a[copy_at_least..]; - let a_spare = &mut a_spare[copy_at_least..]; - - // A -> B Spare - let copy_at_least = cmp::min(cmp::min(a.len(), b_spare.len()), remaining_copy_len); - copy_bytes_overshooting(a, b_spare, copy_at_least); - remaining_copy_len -= copy_at_least; - - if remaining_copy_len == 0 { - return; - } - - let b_spare = &mut b_spare[copy_at_least..]; - - // B -> A Spare - let copy_at_least = cmp::min(cmp::min(b.len(), a_spare.len()), remaining_copy_len); - copy_bytes_overshooting(b, a_spare, copy_at_least); - remaining_copy_len -= copy_at_least; - - if remaining_copy_len == 0 { - return; - } - - let b = &b[copy_at_least..]; - - // B -> B Spare - let copy_at_least = cmp::min(cmp::min(b.len(), b_spare.len()), remaining_copy_len); - copy_bytes_overshooting(b, b_spare, copy_at_least); - remaining_copy_len -= copy_at_least; - - debug_assert_eq!(remaining_copy_len, 0); } } -struct IntermediateRingBuffer<'a> { - this: &'a mut RingBuffer, - original_len: usize, -} - -impl<'a> IntermediateRingBuffer<'a> { - // inspired by `Vec::split_at_spare_mut` - fn as_slices_spare_mut(&mut self) -> (&[u8], &[u8], &mut [u8], &mut [u8]) { - let (a, b) = self.this.buf.as_mut_slices(); - debug_assert!(a.len() + b.len() >= self.original_len); - - let mut remaining_init_len = self.original_len; - let a_mid = cmp::min(a.len(), remaining_init_len); - remaining_init_len -= a_mid; - let b_mid = remaining_init_len; - debug_assert!(b.len() >= b_mid); - - let (a, a_spare) = a.split_at_mut(a_mid); - let (b, b_spare) = b.split_at_mut(b_mid); - debug_assert!(a_spare.is_empty() || b.is_empty()); - - (a, b, a_spare, b_spare) - } -} - -/// Similar to ptr::copy_nonoverlapping -/// -/// But it might overshoot the desired copy length if deemed useful -/// -/// src and dst specify the entire length they are eligible for reading/writing respectively -/// in addition to the desired copy length. -/// -/// This function will then copy in chunks and might copy up to chunk size - 1 more bytes from src to dst -/// if that operation does not read/write memory that does not belong to src/dst. -/// -/// The chunk size is not part of the contract and may change depending on the target platform. -/// -/// If that isn't possible we just fall back to ptr::copy_nonoverlapping -fn copy_bytes_overshooting(src: &[u8], dst: &mut [u8], copy_at_least: usize) { - let src = &src[..copy_at_least]; - let dst = &mut dst[..copy_at_least]; - - dst.copy_from_slice(src); -} - #[cfg(test)] mod tests { use std::vec::Vec; diff --git a/src/lib.rs b/src/lib.rs index 9940b69a..f68e1a20 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,7 +14,6 @@ //! than the original implementation. #![no_std] #![deny(trivial_casts, trivial_numeric_casts, rust_2018_idioms)] -#![forbid(unsafe_code)] #[cfg(feature = "std")] extern crate std;