diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 4d6e80eab..51c01557d 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -16,6 +16,9 @@ use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; +use crate::try_alloc::{ + AllocStrategy, FallibleAllocStrategy, PanickingAllocStrategy, TryReserveError, +}; use crate::{Buf, BufMut, Bytes, TryGetError}; /// A unique reference to a contiguous slice of memory. @@ -599,12 +602,99 @@ impl BytesMut { } // will always succeed - let _ = self.reserve_inner(additional, true); + let _ = self.reserve_inner(additional, true, PanickingAllocStrategy); + } + + /// Reserves capacity for at least `additional` more bytes to be inserted + /// into the given `BytesMut`. + /// + /// More than `additional` bytes may be reserved in order to avoid frequent + /// reallocations. A call to `reserve` may result in an allocation. + /// + /// Before allocating new buffer space, the function will attempt to reclaim + /// space in the existing buffer. If the current handle references a view + /// into a larger original buffer, and all other handles referencing part + /// of the same original buffer have been dropped, then the current view + /// can be copied/shifted to the front of the buffer and the handle can take + /// ownership of the full buffer, provided that the full buffer is large + /// enough to fit the requested additional capacity. + /// + /// This optimization will only happen if shifting the data from the current + /// view to the front of the buffer is not too expensive in terms of the + /// (amortized) time required. The precise condition is subject to change; + /// as of now, the length of the data being shifted needs to be at least as + /// large as the distance that it's shifted by. If the current view is empty + /// and the original buffer is large enough to fit the requested additional + /// capacity, then reallocations will never happen. + /// + /// # Examples + /// + /// In the following example, a new buffer is allocated. + /// + /// ``` + /// # fn main() -> Result<(), bytes::TryReserveError> { + /// use bytes::BytesMut; + /// + /// let mut buf = BytesMut::from(&b"hello"[..]); + /// buf.try_reserve(64)?; + /// assert!(buf.capacity() >= 69); + /// # Ok(()) + /// # } + /// ``` + /// + /// In the following example, the existing buffer is reclaimed. + /// + /// ``` + /// # fn main() -> Result<(), bytes::TryReserveError> { + /// use bytes::{BytesMut, BufMut}; + /// + /// let mut buf = BytesMut::with_capacity(128); + /// buf.put(&[0; 64][..]); + /// + /// let ptr = buf.as_ptr(); + /// let other = buf.split(); + /// + /// assert!(buf.is_empty()); + /// assert_eq!(buf.capacity(), 64); + /// + /// drop(other); + /// buf.try_reserve(128)?; + /// + /// assert_eq!(buf.capacity(), 128); + /// assert_eq!(buf.as_ptr(), ptr); + /// # Ok(()) + /// # } + /// ``` + /// + /// # Errors + /// + /// Errors if the new capacity overflows `usize` or the underlying allocator fails. + #[inline] + pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { + let len = self.len(); + let rem = self.capacity() - len; + + if additional <= rem { + // The handle can already store at least `additional` more bytes, so + // there is no further work needed to be done. + return Ok(()); + } + + self.reserve_inner(additional, true, FallibleAllocStrategy) + .map(|_| ()) } // In separate function to allow the short-circuits in `reserve` and `try_reclaim` to // be inline-able. Significantly helps performance. Returns false if it did not succeed. - fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool { + fn reserve_inner( + &mut self, + additional: usize, + allocate: bool, + strategy: S, + ) -> Result + where + S: AllocStrategy, + { let len = self.len(); let kind = self.kind(); @@ -650,13 +740,13 @@ impl BytesMut { self.cap += off; } else { if !allocate { - return false; + return Ok(false); } // Not enough space, or reusing might be too much overhead: // allocate more space! let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off)); - v.reserve(additional); + strategy.fallible_reserve(&mut v, additional)?; // Update the info self.ptr = vptr(v.as_mut_ptr().add(off)); @@ -664,7 +754,7 @@ impl BytesMut { debug_assert_eq!(self.len, v.len() - off); } - return true; + return Ok(true); } } @@ -677,8 +767,8 @@ impl BytesMut { // Compute the new capacity let mut new_cap = match len.checked_add(additional) { Some(new_cap) => new_cap, - None if !allocate => return false, - None => panic!("overflow"), + None if !allocate => return Ok(false), + None => return Err(strategy.capacity_overflow()), }; unsafe { @@ -711,7 +801,7 @@ impl BytesMut { self.cap = v.capacity(); } else { if !allocate { - return false; + return Ok(false); } // calculate offset let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize); @@ -720,7 +810,9 @@ impl BytesMut { // `Vec`, so it does not take the offset into account. // // Thus we have to manually add it here. - new_cap = new_cap.checked_add(off).expect("overflow"); + new_cap = new_cap + .checked_add(off) + .ok_or_else(|| strategy.capacity_overflow())?; // The vector capacity is not sufficient. The reserve request is // asking for more than the initial buffer capacity. Allocate more @@ -744,18 +836,19 @@ impl BytesMut { // care about in the unused capacity before calling `reserve`. debug_assert!(off + len <= v.capacity()); v.set_len(off + len); - v.reserve(new_cap - v.len()); + let additional = new_cap - v.len(); + strategy.fallible_reserve(v, additional)?; // Update the info self.ptr = vptr(v.as_mut_ptr().add(off)); self.cap = v.capacity() - off; } - return true; + return Ok(true); } } if !allocate { - return false; + return Ok(false); } let original_capacity_repr = unsafe { (*shared).original_capacity_repr }; @@ -779,7 +872,7 @@ impl BytesMut { self.ptr = vptr(v.as_mut_ptr()); self.cap = v.capacity(); debug_assert_eq!(self.len, v.len()); - true + Ok(true) } /// Attempts to cheaply reclaim already allocated capacity for at least `additional` more @@ -839,7 +932,10 @@ impl BytesMut { return true; } - self.reserve_inner(additional, false) + match self.reserve_inner(additional, false, PanickingAllocStrategy) { + Ok(reclaimed) => reclaimed, + Err(never) => match never {}, + } } /// Appends given bytes to this `BytesMut`. diff --git a/src/lib.rs b/src/lib.rs index fb5c506e8..316518ca1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -83,8 +83,10 @@ mod bytes; mod bytes_mut; mod fmt; mod loom; +mod try_alloc; pub use crate::bytes::Bytes; pub use crate::bytes_mut::BytesMut; +pub use crate::try_alloc::TryReserveError; // Optional Serde support #[cfg(feature = "serde")] diff --git a/src/try_alloc.rs b/src/try_alloc.rs new file mode 100644 index 000000000..bccf29eaa --- /dev/null +++ b/src/try_alloc.rs @@ -0,0 +1,76 @@ +use alloc::vec::Vec; +use core::{ + convert::Infallible, + fmt::{self, Display}, +}; +#[cfg(feature = "std")] +use std::error::Error; + +/// The error type for try_reserve methods. +#[derive(Debug)] +pub struct TryReserveError(TryReserveErrorInner); + +#[derive(Debug)] +pub(crate) enum TryReserveErrorInner { + Std(alloc::collections::TryReserveError), + Overflow, +} + +impl Display for TryReserveError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match &self.0 { + TryReserveErrorInner::Std(err) => Display::fmt(err, f), + TryReserveErrorInner::Overflow => f.write_str("memory allocation failed because the computed capacity exceeded the collection's maximum"), + } + } +} + +#[cfg(feature = "std")] +impl Error for TryReserveError {} + +/// The allocation strategy +/// +/// # Safety +/// +/// `fallible_reserve` must behave the same as `Vec::reserve` or +/// `Vec::try_reserve`. +pub(crate) unsafe trait AllocStrategy { + type Err; + + fn fallible_reserve(&self, vec: &mut Vec, additional: usize) -> Result<(), Self::Err>; + + #[track_caller] + fn capacity_overflow(&self) -> Self::Err; +} + +pub(crate) struct PanickingAllocStrategy; + +unsafe impl AllocStrategy for PanickingAllocStrategy { + type Err = Infallible; + + fn fallible_reserve(&self, vec: &mut Vec, additional: usize) -> Result<(), Self::Err> { + vec.reserve(additional); + Ok(()) + } + + #[track_caller] + fn capacity_overflow(&self) -> Self::Err { + panic!("overflow") + } +} + +pub(crate) struct FallibleAllocStrategy; + +unsafe impl AllocStrategy for FallibleAllocStrategy { + type Err = TryReserveError; + + fn fallible_reserve(&self, vec: &mut Vec, additional: usize) -> Result<(), Self::Err> { + vec.try_reserve(additional) + .map_err(|err| TryReserveError(TryReserveErrorInner::Std(err))) + } + + #[track_caller] + fn capacity_overflow(&self) -> Self::Err { + TryReserveError(TryReserveErrorInner::Overflow) + } +} diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 85c007961..e1524ad37 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1028,6 +1028,18 @@ fn bytes_reserve_overflow() { bytes.reserve(usize::MAX); } +#[test] +fn bytes_try_reserve_overflow() { + let mut bytes = BytesMut::with_capacity(1024); + bytes.put_slice(b"hello world"); + + let err = bytes.try_reserve(usize::MAX).unwrap_err(); + assert_eq!( + "memory allocation failed because the computed capacity exceeded the collection's maximum", + err.to_string() + ); +} + #[test] fn bytes_with_capacity_but_empty() { // See https://github.com/tokio-rs/bytes/issues/340