From 05e9d5cab95e5ce1d57dd498a9567639de50f841 Mon Sep 17 00:00:00 2001 From: "brian m. carlson" Date: Tue, 31 Jan 2023 19:04:22 +0000 Subject: [PATCH 01/53] Avoid large reallocations when freezing BytesMut (#592) When we freeze a BytesMut, we turn it into a Vec, and then convert that to a Bytes. Currently, this happen using Vec::into_boxed_slice, which reallocates to a slice of the same length as the Vev if the length and the capacity are not equal. This can pose a performance problem if the Vec is large or if this happens many times in a loop. Instead, let's compare the length and capacity, and if they're the same, continue to handle this using into_boxed_slice. Otherwise, since we have a type of vtable which can handle a separate capacity, the shared vtable, let's turn our Vec into that kind of Bytes. While this does not avoid allocation altogether, it performs a fixed size allocation and avoids any need to memcpy. --- src/bytes.rs | 32 ++++++++++++++++++++++++++++++-- tests/test_bytes.rs | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index b4745a9..0404a72 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -807,8 +807,36 @@ impl From<&'static str> for Bytes { impl From> for Bytes { fn from(vec: Vec) -> Bytes { - let slice = vec.into_boxed_slice(); - slice.into() + let mut vec = vec; + let ptr = vec.as_mut_ptr(); + let len = vec.len(); + let cap = vec.capacity(); + + // Avoid an extra allocation if possible. + if len == cap { + return Bytes::from(vec.into_boxed_slice()); + } + + let shared = Box::new(Shared { + buf: ptr, + cap, + ref_cnt: AtomicUsize::new(1), + }); + mem::forget(vec); + + let shared = Box::into_raw(shared); + // The pointer should be aligned, so this assert should + // always succeed. + debug_assert!( + 0 == (shared as usize & KIND_MASK), + "internal: Box should have an aligned pointer", + ); + Bytes { + ptr, + len, + data: AtomicPtr::new(shared as _), + vtable: &SHARED_VTABLE, + } } } diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 4ddb24d..5ec60a5 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1163,3 +1163,48 @@ fn test_bytes_into_vec_promotable_even() { assert_eq!(Vec::from(b2), vec[20..]); assert_eq!(Vec::from(b1), vec[..20]); } + +#[test] +fn test_bytes_vec_conversion() { + let mut vec = Vec::with_capacity(10); + vec.extend(b"abcdefg"); + let b = Bytes::from(vec); + let v = Vec::from(b); + assert_eq!(v.len(), 7); + assert_eq!(v.capacity(), 10); + + let mut b = Bytes::from(v); + b.advance(1); + let v = Vec::from(b); + assert_eq!(v.len(), 6); + assert_eq!(v.capacity(), 10); + assert_eq!(v.as_slice(), b"bcdefg"); +} + +#[test] +fn test_bytes_mut_conversion() { + let mut b1 = BytesMut::with_capacity(10); + b1.extend(b"abcdefg"); + let b2 = Bytes::from(b1); + let v = Vec::from(b2); + assert_eq!(v.len(), 7); + assert_eq!(v.capacity(), 10); + + let mut b = Bytes::from(v); + b.advance(1); + let v = Vec::from(b); + assert_eq!(v.len(), 6); + assert_eq!(v.capacity(), 10); + assert_eq!(v.as_slice(), b"bcdefg"); +} + +#[test] +fn test_bytes_capacity_len() { + for cap in 0..100 { + for len in 0..=cap { + let mut v = Vec::with_capacity(cap); + v.resize(len, 0); + let _ = Bytes::from(v); + } + } +} From 21ed3328364716fa30a4bf7502c913bbf0a90f45 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Tue, 31 Jan 2023 20:38:32 +0100 Subject: [PATCH 02/53] chore: prepare bytes v1.4.0 (#593) --- CHANGELOG.md | 15 +++++++++++++++ Cargo.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 760c210..a1bad4a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,18 @@ +# 1.4.0 (January 31, 2023) + +### Added + +- Make `IntoIter` constructor public (#581) + +### Fixed + +- Avoid large reallocations when freezing `BytesMut` (#592) + +### Documented + +- Document which functions require `std` (#591) +- Fix duplicate "the the" typos (#585) + # 1.3.0 (November 20, 2022) ### Added diff --git a/Cargo.toml b/Cargo.toml index 9efd8a1..4a96ec1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ name = "bytes" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.3.0" +version = "1.4.0" license = "MIT" authors = [ "Carl Lerche ", From 74b04c7aae5fd7e73f4283774eab0ef72a26a8a7 Mon Sep 17 00:00:00 2001 From: Paa Kojo Samanpa Date: Sat, 4 Feb 2023 14:16:41 -0500 Subject: [PATCH 03/53] Mark BytesMut::extend_from_slice as inline (#595) This function can be hot in applications that do a lot of encoding. Ideally would do the same for `::put_slice` and ` Date: Thu, 9 Feb 2023 17:24:23 +0100 Subject: [PATCH 04/53] Add a safe way to create UninitSlice from slices (#598) Introduce UninitSlice::from_slice and UninitSlice::from_uninit_slice methods which safely create Uninit slice from provided slice of maybe uninitialised or initialised memory. In addition, add `From<&mut [T]>` implementations (for `T=u8` and `T=MaybeUninit`) which do conversion from slice to UninitSlice. Closes: #552 --- src/buf/uninit_slice.rs | 56 ++++++++++++++++++++++++++++++++++++----- src/bytes_mut.rs | 2 +- 2 files changed, 51 insertions(+), 7 deletions(-) diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index 3161a14..84b1d88 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -22,10 +22,44 @@ use core::ops::{ pub struct UninitSlice([MaybeUninit]); impl UninitSlice { - pub(crate) fn from_slice(slice: &mut [MaybeUninit]) -> &mut UninitSlice { + /// Creates a `&mut UninitSlice` wrapping slice of uninitialised memory. + /// + /// # Examples + /// + /// ``` + /// use bytes::buf::UninitSlice; + /// use core::mem::MaybeUninit; + /// + /// let mut buffer = [MaybeUninit::uninit(); 64]; + /// let slice = UninitSlice::from_uninit_slice(&mut buffer[..]); + /// + /// let mut vec = Vec::with_capacity(1024); + /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); + /// ``` + #[inline] + pub fn from_uninit_slice(slice: &mut [MaybeUninit]) -> &mut UninitSlice { unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } } + fn from_uninit_slice_ref(slice: &[MaybeUninit]) -> &UninitSlice { + unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } + } + + /// Creates a `&mut UninitSlice` wrapping slice of initialised memory. + /// + /// # Examples + /// + /// ``` + /// use bytes::buf::UninitSlice; + /// + /// let mut buffer = [0u8; 64]; + /// let slice = UninitSlice::from_slice(&mut buffer[..]); + /// ``` + #[inline] + pub fn from_slice(slice: &mut [u8]) -> &mut UninitSlice { + unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } + } + /// Create a `&mut UninitSlice` from a pointer and a length. /// /// # Safety @@ -48,7 +82,7 @@ impl UninitSlice { pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { let maybe_init: &mut [MaybeUninit] = core::slice::from_raw_parts_mut(ptr as *mut _, len); - Self::from_slice(maybe_init) + Self::from_uninit_slice(maybe_init) } /// Write a single byte at the specified offset. @@ -179,6 +213,18 @@ impl fmt::Debug for UninitSlice { } } +impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice { + fn from(slice: &'a mut [u8]) -> Self { + UninitSlice::from_slice(slice) + } +} + +impl<'a> From<&'a mut [MaybeUninit]> for &'a mut UninitSlice { + fn from(slice: &'a mut [MaybeUninit]) -> Self { + UninitSlice::from_uninit_slice(slice) + } +} + macro_rules! impl_index { ($($t:ty),*) => { $( @@ -187,16 +233,14 @@ macro_rules! impl_index { #[inline] fn index(&self, index: $t) -> &UninitSlice { - let maybe_uninit: &[MaybeUninit] = &self.0[index]; - unsafe { &*(maybe_uninit as *const [MaybeUninit] as *const UninitSlice) } + UninitSlice::from_uninit_slice_ref(&self.0[index]) } } impl IndexMut<$t> for UninitSlice { #[inline] fn index_mut(&mut self, index: $t) -> &mut UninitSlice { - let maybe_uninit: &mut [MaybeUninit] = &mut self.0[index]; - unsafe { &mut *(maybe_uninit as *mut [MaybeUninit] as *mut UninitSlice) } + UninitSlice::from_uninit_slice(&mut self.0[index]) } } )* diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 450b932..c5c2e52 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1102,7 +1102,7 @@ unsafe impl BufMut for BytesMut { if self.capacity() == self.len() { self.reserve(64); } - UninitSlice::from_slice(self.spare_capacity_mut()) + self.spare_capacity_mut().into() } // Specialize these methods so they can skip checking `remaining_mut` From b29112ce4484424a0137173310ec8b9f84db27ae Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Fri, 10 Feb 2023 10:28:24 +0100 Subject: [PATCH 05/53] Implement BufMut for `&mut [MaybeUninit]` (#597) --- src/buf/buf_mut.rs | 35 ++++++++++++ tests/test_buf_mut.rs | 123 +++++++++++++++++++++++++++++++++++++----- 2 files changed, 145 insertions(+), 13 deletions(-) diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs index 685fcc7..2a3c243 100644 --- a/src/buf/buf_mut.rs +++ b/src/buf/buf_mut.rs @@ -1419,6 +1419,41 @@ unsafe impl BufMut for &mut [u8] { } } +unsafe impl BufMut for &mut [core::mem::MaybeUninit] { + #[inline] + fn remaining_mut(&self) -> usize { + self.len() + } + + #[inline] + fn chunk_mut(&mut self) -> &mut UninitSlice { + UninitSlice::from_uninit_slice(self) + } + + #[inline] + unsafe fn advance_mut(&mut self, cnt: usize) { + // Lifetime dance taken from `impl Write for &mut [u8]`. + let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); + *self = b; + } + + #[inline] + fn put_slice(&mut self, src: &[u8]) { + self.chunk_mut()[..src.len()].copy_from_slice(src); + unsafe { + self.advance_mut(src.len()); + } + } + + fn put_bytes(&mut self, val: u8, cnt: usize) { + assert!(self.remaining_mut() >= cnt); + unsafe { + ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt); + self.advance_mut(cnt); + } + } +} + unsafe impl BufMut for Vec { #[inline] fn remaining_mut(&self) -> usize { diff --git a/tests/test_buf_mut.rs b/tests/test_buf_mut.rs index 53f4e86..865cccc 100644 --- a/tests/test_buf_mut.rs +++ b/tests/test_buf_mut.rs @@ -3,6 +3,7 @@ use bytes::buf::UninitSlice; use bytes::{BufMut, BytesMut}; use core::fmt::Write; +use core::mem::MaybeUninit; use core::usize; #[test] @@ -101,24 +102,120 @@ fn test_clone() { assert!(buf != buf2); } +fn do_test_slice_small(make: impl Fn(&mut [u8]) -> &mut T) +where + for<'r> &'r mut T: BufMut, +{ + let mut buf = [b'X'; 8]; + + let mut slice = make(&mut buf[..]); + slice.put_bytes(b'A', 2); + slice.put_u8(b'B'); + slice.put_slice(b"BCC"); + assert_eq!(2, slice.remaining_mut()); + assert_eq!(b"AABBCCXX", &buf[..]); + + let mut slice = make(&mut buf[..]); + slice.put_u32(0x61626364); + assert_eq!(4, slice.remaining_mut()); + assert_eq!(b"abcdCCXX", &buf[..]); + + let mut slice = make(&mut buf[..]); + slice.put_u32_le(0x30313233); + assert_eq!(4, slice.remaining_mut()); + assert_eq!(b"3210CCXX", &buf[..]); +} + +fn do_test_slice_large(make: impl Fn(&mut [u8]) -> &mut T) +where + for<'r> &'r mut T: BufMut, +{ + const LEN: usize = 100; + const FILL: [u8; LEN] = [b'Y'; LEN]; + + let test = |fill: &dyn Fn(&mut &mut T, usize)| { + for buf_len in 0..LEN { + let mut buf = [b'X'; LEN]; + for fill_len in 0..=buf_len { + let mut slice = make(&mut buf[..buf_len]); + fill(&mut slice, fill_len); + assert_eq!(buf_len - fill_len, slice.remaining_mut()); + let (head, tail) = buf.split_at(fill_len); + assert_eq!(&FILL[..fill_len], head); + assert!(tail.iter().all(|b| *b == b'X')); + } + } + }; + + test(&|slice, fill_len| slice.put_slice(&FILL[..fill_len])); + test(&|slice, fill_len| slice.put_bytes(FILL[0], fill_len)); +} + +fn do_test_slice_put_slice_panics(make: impl Fn(&mut [u8]) -> &mut T) +where + for<'r> &'r mut T: BufMut, +{ + let mut buf = [b'X'; 4]; + let mut slice = make(&mut buf[..]); + slice.put_slice(b"12345"); +} + +fn do_test_slice_put_bytes_panics(make: impl Fn(&mut [u8]) -> &mut T) +where + for<'r> &'r mut T: BufMut, +{ + let mut buf = [b'X'; 4]; + let mut slice = make(&mut buf[..]); + slice.put_bytes(b'1', 5); +} + +#[test] +fn test_slice_buf_mut_small() { + do_test_slice_small(|x| x); +} + #[test] -fn test_mut_slice() { - let mut v = vec![0, 0, 0, 0]; - let mut s = &mut v[..]; - s.put_u32(42); +fn test_slice_buf_mut_large() { + do_test_slice_large(|x| x); +} - assert_eq!(s.len(), 0); - assert_eq!(&v, &[0, 0, 0, 42]); +#[test] +#[should_panic] +fn test_slice_buf_mut_put_slice_overflow() { + do_test_slice_put_slice_panics(|x| x); } #[test] -fn test_slice_put_bytes() { - let mut v = [0, 0, 0, 0]; - let mut s = &mut v[..]; - s.put_u8(17); - s.put_bytes(19, 2); - assert_eq!(1, s.remaining_mut()); - assert_eq!(&[17, 19, 19, 0], &v[..]); +#[should_panic] +fn test_slice_buf_mut_put_bytes_overflow() { + do_test_slice_put_bytes_panics(|x| x); +} + +fn make_maybe_uninit_slice(slice: &mut [u8]) -> &mut [MaybeUninit] { + // SAFETY: [u8] has the same layout as [MaybeUninit]. + unsafe { core::mem::transmute(slice) } +} + +#[test] +fn test_maybe_uninit_buf_mut_small() { + do_test_slice_small(make_maybe_uninit_slice); +} + +#[test] +fn test_maybe_uninit_buf_mut_large() { + do_test_slice_large(make_maybe_uninit_slice); +} + +#[test] +#[should_panic] +fn test_maybe_uninit_buf_mut_put_slice_overflow() { + do_test_slice_put_slice_panics(make_maybe_uninit_slice); +} + +#[test] +#[should_panic] +fn test_maybe_uninit_buf_mut_put_bytes_overflow() { + do_test_slice_put_bytes_panics(make_maybe_uninit_slice); } #[test] From c7756c3e5976246926a5c8227c181a97d80cf22b Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 5 Jun 2023 01:04:59 +0900 Subject: [PATCH 06/53] Fix CI failure (#616) --- tests/test_buf.rs | 1 + tests/test_buf_mut.rs | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/test_buf.rs b/tests/test_buf.rs index fbad003..3940f92 100644 --- a/tests/test_buf.rs +++ b/tests/test_buf.rs @@ -72,6 +72,7 @@ fn test_vec_deque() { assert_eq!(b"world piece", &out[..]); } +#[allow(unused_allocation)] // This is intentional. #[test] fn test_deref_buf_forwards() { struct Special; diff --git a/tests/test_buf_mut.rs b/tests/test_buf_mut.rs index 865cccc..33aa680 100644 --- a/tests/test_buf_mut.rs +++ b/tests/test_buf_mut.rs @@ -218,6 +218,7 @@ fn test_maybe_uninit_buf_mut_put_bytes_overflow() { do_test_slice_put_bytes_panics(make_maybe_uninit_slice); } +#[allow(unused_allocation)] // This is intentional. #[test] fn test_deref_bufmut_forwards() { struct Special; From 64c4fa286771ad9e522ffbefc576bcf7b76933d0 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Tue, 20 Jun 2023 10:19:56 +0200 Subject: [PATCH 07/53] Rename UninitSlice constructors for consistency with ReadBuf (#599) tokio::io::ReadBuf uses names `new` and `uninit` for its constructors. For consistency with that, rename recently introduced UninitSlice constructors to match those names. --- src/buf/buf_mut.rs | 2 +- src/buf/uninit_slice.rs | 46 ++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs index 2a3c243..c2ac39d 100644 --- a/src/buf/buf_mut.rs +++ b/src/buf/buf_mut.rs @@ -1427,7 +1427,7 @@ unsafe impl BufMut for &mut [core::mem::MaybeUninit] { #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { - UninitSlice::from_uninit_slice(self) + UninitSlice::uninit(self) } #[inline] diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index 84b1d88..c5d86e8 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -22,42 +22,42 @@ use core::ops::{ pub struct UninitSlice([MaybeUninit]); impl UninitSlice { - /// Creates a `&mut UninitSlice` wrapping slice of uninitialised memory. + /// Creates a `&mut UninitSlice` wrapping a slice of initialised memory. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; - /// use core::mem::MaybeUninit; - /// - /// let mut buffer = [MaybeUninit::uninit(); 64]; - /// let slice = UninitSlice::from_uninit_slice(&mut buffer[..]); /// - /// let mut vec = Vec::with_capacity(1024); - /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); + /// let mut buffer = [0u8; 64]; + /// let slice = UninitSlice::new(&mut buffer[..]); /// ``` #[inline] - pub fn from_uninit_slice(slice: &mut [MaybeUninit]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } - } - - fn from_uninit_slice_ref(slice: &[MaybeUninit]) -> &UninitSlice { - unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } + pub fn new(slice: &mut [u8]) -> &mut UninitSlice { + unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } } - /// Creates a `&mut UninitSlice` wrapping slice of initialised memory. + /// Creates a `&mut UninitSlice` wrapping a slice of uninitialised memory. /// /// # Examples /// /// ``` /// use bytes::buf::UninitSlice; + /// use core::mem::MaybeUninit; /// - /// let mut buffer = [0u8; 64]; - /// let slice = UninitSlice::from_slice(&mut buffer[..]); + /// let mut buffer = [MaybeUninit::uninit(); 64]; + /// let slice = UninitSlice::uninit(&mut buffer[..]); + /// + /// let mut vec = Vec::with_capacity(1024); + /// let spare: &mut UninitSlice = vec.spare_capacity_mut().into(); /// ``` #[inline] - pub fn from_slice(slice: &mut [u8]) -> &mut UninitSlice { - unsafe { &mut *(slice as *mut [u8] as *mut [MaybeUninit] as *mut UninitSlice) } + pub fn uninit(slice: &mut [MaybeUninit]) -> &mut UninitSlice { + unsafe { &mut *(slice as *mut [MaybeUninit] as *mut UninitSlice) } + } + + fn uninit_ref(slice: &[MaybeUninit]) -> &UninitSlice { + unsafe { &*(slice as *const [MaybeUninit] as *const UninitSlice) } } /// Create a `&mut UninitSlice` from a pointer and a length. @@ -82,7 +82,7 @@ impl UninitSlice { pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice { let maybe_init: &mut [MaybeUninit] = core::slice::from_raw_parts_mut(ptr as *mut _, len); - Self::from_uninit_slice(maybe_init) + Self::uninit(maybe_init) } /// Write a single byte at the specified offset. @@ -215,13 +215,13 @@ impl fmt::Debug for UninitSlice { impl<'a> From<&'a mut [u8]> for &'a mut UninitSlice { fn from(slice: &'a mut [u8]) -> Self { - UninitSlice::from_slice(slice) + UninitSlice::new(slice) } } impl<'a> From<&'a mut [MaybeUninit]> for &'a mut UninitSlice { fn from(slice: &'a mut [MaybeUninit]) -> Self { - UninitSlice::from_uninit_slice(slice) + UninitSlice::uninit(slice) } } @@ -233,14 +233,14 @@ macro_rules! impl_index { #[inline] fn index(&self, index: $t) -> &UninitSlice { - UninitSlice::from_uninit_slice_ref(&self.0[index]) + UninitSlice::uninit_ref(&self.0[index]) } } impl IndexMut<$t> for UninitSlice { #[inline] fn index_mut(&mut self, index: $t) -> &mut UninitSlice { - UninitSlice::from_uninit_slice(&mut self.0[index]) + UninitSlice::uninit(&mut self.0[index]) } } )* From 74e6e200fd671340d4d4a874f83776def04f6c7b Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 7 Sep 2023 10:52:15 +0200 Subject: [PATCH 08/53] chore: prepare bytes v1.5.0 (#627) --- CHANGELOG.md | 11 +++++++++++ Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a1bad4a..47e4880 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,14 @@ +# 1.5.0 (September 7, 2023) + +### Added + +- Add `UninitSlice::{new,init}` (#598, #599) +- Implement `BufMut` for `&mut [MaybeUninit]` (#597) + +### Changed + +- Mark `BytesMut::extend_from_slice` as inline (#595) + # 1.4.0 (January 31, 2023) ### Added diff --git a/Cargo.toml b/Cargo.toml index 4a96ec1..06b19e6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ name = "bytes" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.4.0" +version = "1.5.0" license = "MIT" authors = [ "Carl Lerche ", From bd9c164cb65cf9b80436c3229a6753dc9c4e73eb Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Thu, 7 Sep 2023 14:10:55 +0200 Subject: [PATCH 09/53] doc: fix changelog typo (#628) --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47e4880..67b9f67 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,7 +2,7 @@ ### Added -- Add `UninitSlice::{new,init}` (#598, #599) +- Add `UninitSlice::{new,uninit}` (#598, #599) - Implement `BufMut` for `&mut [MaybeUninit]` (#597) ### Changed From a14ef4617c1d041cd4b479cd7f5453054b8e639a Mon Sep 17 00:00:00 2001 From: Lucas Kent Date: Mon, 11 Sep 2023 19:12:11 +1000 Subject: [PATCH 10/53] Move comment to correct constant (#629) --- src/bytes_mut.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index c5c2e52..79a8877 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -96,11 +96,11 @@ const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10; const ORIGINAL_CAPACITY_MASK: usize = 0b11100; const ORIGINAL_CAPACITY_OFFSET: usize = 2; +const VEC_POS_OFFSET: usize = 5; // When the storage is in the `Vec` representation, the pointer can be advanced // at most this value. This is due to the amount of storage available to track // the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY // bits. -const VEC_POS_OFFSET: usize = 5; const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET; const NOT_VEC_POS_MASK: usize = 0b11111; From a4e16a552bf5bee0b75d549a98551d5ab1b074e4 Mon Sep 17 00:00:00 2001 From: mxsm Date: Mon, 25 Sep 2023 16:47:02 +0800 Subject: [PATCH 11/53] docs: fix some spelling mistakes (#633) --- src/buf/uninit_slice.rs | 2 +- src/bytes_mut.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index c5d86e8..b0eeed4 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -168,7 +168,7 @@ impl UninitSlice { /// /// The caller **must not** read from the referenced memory and **must not** write /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation - /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized + /// that created the `UninitSlice` knows which parts are initialized. Writing uninitialized /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined /// behavior. /// diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 79a8877..b4be074 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1684,7 +1684,7 @@ fn invalid_ptr(addr: usize) -> *mut T { /// self.ptr.as_ptr().offset_from(ptr) as usize; /// ``` /// -/// But due to min rust is 1.39 and it is only stablised +/// But due to min rust is 1.39 and it is only stabilized /// in 1.47, we cannot use it. #[inline] fn offset_from(dst: *mut u8, original: *mut u8) -> usize { From fd9243f9e2fe2027243b1a23d518c723c24a17b7 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Mon, 2 Oct 2023 15:40:02 +0200 Subject: [PATCH 12/53] Various cleanup (#635) --- src/buf/buf_impl.rs | 168 ++++++++++++++++++-------- src/buf/buf_mut.rs | 261 ++++++++++++++++++++++++++++------------ src/buf/chain.rs | 2 +- src/buf/uninit_slice.rs | 2 +- src/bytes.rs | 2 +- src/bytes_mut.rs | 14 +-- src/lib.rs | 37 ++++++ tests/test_buf_mut.rs | 2 +- 8 files changed, 351 insertions(+), 137 deletions(-) diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs index 366cfc9..b4ebf40 100644 --- a/src/buf/buf_impl.rs +++ b/src/buf/buf_impl.rs @@ -1,8 +1,9 @@ #[cfg(feature = "std")] use crate::buf::{reader, Reader}; use crate::buf::{take, Chain, Take}; - -use core::{cmp, mem, ptr}; +#[cfg(feature = "std")] +use crate::{min_u64_usize, saturating_sub_usize_u64}; +use crate::{panic_advance, panic_does_not_fit}; #[cfg(feature = "std")] use std::io::IoSlice; @@ -11,7 +12,12 @@ use alloc::boxed::Box; macro_rules! buf_get_impl { ($this:ident, $typ:tt::$conv:tt) => {{ - const SIZE: usize = mem::size_of::<$typ>(); + const SIZE: usize = core::mem::size_of::<$typ>(); + + if $this.remaining() < SIZE { + panic_advance(SIZE, $this.remaining()); + } + // try to convert directly from the bytes // this Option trick is to avoid keeping a borrow on self // when advance() is called (mut borrow) and to call bytes() only once @@ -32,19 +38,30 @@ macro_rules! buf_get_impl { } }}; (le => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); + const SIZE: usize = core::mem::size_of::<$typ>(); // The same trick as above does not improve the best case speed. // It seems to be linked to the way the method is optimised by the compiler - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[..($len_to_read)]); + let mut buf = [0; SIZE]; + + let subslice = match buf.get_mut(..$len_to_read) { + Some(subslice) => subslice, + None => panic_does_not_fit(SIZE, $len_to_read), + }; + + $this.copy_to_slice(subslice); return $typ::from_le_bytes(buf); }}; (be => $this:ident, $typ:tt, $len_to_read:expr) => {{ - debug_assert!(mem::size_of::<$typ>() >= $len_to_read); + const SIZE: usize = core::mem::size_of::<$typ>(); + + let slice_at = match SIZE.checked_sub($len_to_read) { + Some(slice_at) => slice_at, + None => panic_does_not_fit(SIZE, $len_to_read), + }; - let mut buf = [0; (mem::size_of::<$typ>())]; - $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]); + let mut buf = [0; SIZE]; + $this.copy_to_slice(&mut buf[slice_at..]); return $typ::from_be_bytes(buf); }}; } @@ -247,23 +264,18 @@ pub trait Buf { /// /// # Panics /// - /// This function panics if `self.remaining() < dst.len()` - fn copy_to_slice(&mut self, dst: &mut [u8]) { - let mut off = 0; - - assert!(self.remaining() >= dst.len()); - - while off < dst.len() { - let cnt; - - unsafe { - let src = self.chunk(); - cnt = cmp::min(src.len(), dst.len() - off); + /// This function panics if `self.remaining() < dst.len()`. + fn copy_to_slice(&mut self, mut dst: &mut [u8]) { + if self.remaining() < dst.len() { + panic_advance(dst.len(), self.remaining()); + } - ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt); + while !dst.is_empty() { + let src = self.chunk(); + let cnt = usize::min(src.len(), dst.len()); - off += cnt; - } + dst[..cnt].copy_from_slice(&src[..cnt]); + dst = &mut dst[cnt..]; self.advance(cnt); } @@ -286,7 +298,9 @@ pub trait Buf { /// /// This function panics if there is no more remaining data in `self`. fn get_u8(&mut self) -> u8 { - assert!(self.remaining() >= 1); + if self.remaining() < 1 { + panic_advance(1, 0); + } let ret = self.chunk()[0]; self.advance(1); ret @@ -309,7 +323,9 @@ pub trait Buf { /// /// This function panics if there is no more remaining data in `self`. fn get_i8(&mut self) -> i8 { - assert!(self.remaining() >= 1); + if self.remaining() < 1 { + panic_advance(1, 0); + } let ret = self.chunk()[0] as i8; self.advance(1); ret @@ -877,7 +893,8 @@ pub trait Buf { /// /// # Panics /// - /// This function panics if there is not enough remaining data in `self`. + /// This function panics if there is not enough remaining data in `self`, or + /// if `nbytes` is greater than 8. fn get_uint_ne(&mut self, nbytes: usize) -> u64 { if cfg!(target_endian = "big") { self.get_uint(nbytes) @@ -901,7 +918,8 @@ pub trait Buf { /// /// # Panics /// - /// This function panics if there is not enough remaining data in `self`. + /// This function panics if there is not enough remaining data in `self`, or + /// if `nbytes` is greater than 8. fn get_int(&mut self, nbytes: usize) -> i64 { buf_get_impl!(be => self, i64, nbytes); } @@ -921,7 +939,8 @@ pub trait Buf { /// /// # Panics /// - /// This function panics if there is not enough remaining data in `self`. + /// This function panics if there is not enough remaining data in `self`, or + /// if `nbytes` is greater than 8. fn get_int_le(&mut self, nbytes: usize) -> i64 { buf_get_impl!(le => self, i64, nbytes); } @@ -944,7 +963,8 @@ pub trait Buf { /// /// # Panics /// - /// This function panics if there is not enough remaining data in `self`. + /// This function panics if there is not enough remaining data in `self`, or + /// if `nbytes` is greater than 8. fn get_int_ne(&mut self, nbytes: usize) -> i64 { if cfg!(target_endian = "big") { self.get_int(nbytes) @@ -1103,7 +1123,9 @@ pub trait Buf { fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { use super::BufMut; - assert!(len <= self.remaining(), "`len` greater than remaining"); + if self.remaining() < len { + panic_advance(len, self.remaining()); + } let mut ret = crate::BytesMut::with_capacity(len); ret.put(self.take(len)); @@ -1195,135 +1217,168 @@ pub trait Buf { macro_rules! deref_forward_buf { () => { + #[inline] fn remaining(&self) -> usize { (**self).remaining() } + #[inline] fn chunk(&self) -> &[u8] { (**self).chunk() } #[cfg(feature = "std")] + #[inline] fn chunks_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize { (**self).chunks_vectored(dst) } + #[inline] fn advance(&mut self, cnt: usize) { (**self).advance(cnt) } + #[inline] fn has_remaining(&self) -> bool { (**self).has_remaining() } + #[inline] fn copy_to_slice(&mut self, dst: &mut [u8]) { (**self).copy_to_slice(dst) } + #[inline] fn get_u8(&mut self) -> u8 { (**self).get_u8() } + #[inline] fn get_i8(&mut self) -> i8 { (**self).get_i8() } + #[inline] fn get_u16(&mut self) -> u16 { (**self).get_u16() } + #[inline] fn get_u16_le(&mut self) -> u16 { (**self).get_u16_le() } + #[inline] fn get_u16_ne(&mut self) -> u16 { (**self).get_u16_ne() } + #[inline] fn get_i16(&mut self) -> i16 { (**self).get_i16() } + #[inline] fn get_i16_le(&mut self) -> i16 { (**self).get_i16_le() } + #[inline] fn get_i16_ne(&mut self) -> i16 { (**self).get_i16_ne() } + #[inline] fn get_u32(&mut self) -> u32 { (**self).get_u32() } + #[inline] fn get_u32_le(&mut self) -> u32 { (**self).get_u32_le() } + #[inline] fn get_u32_ne(&mut self) -> u32 { (**self).get_u32_ne() } + #[inline] fn get_i32(&mut self) -> i32 { (**self).get_i32() } + #[inline] fn get_i32_le(&mut self) -> i32 { (**self).get_i32_le() } + #[inline] fn get_i32_ne(&mut self) -> i32 { (**self).get_i32_ne() } + #[inline] fn get_u64(&mut self) -> u64 { (**self).get_u64() } + #[inline] fn get_u64_le(&mut self) -> u64 { (**self).get_u64_le() } + #[inline] fn get_u64_ne(&mut self) -> u64 { (**self).get_u64_ne() } + #[inline] fn get_i64(&mut self) -> i64 { (**self).get_i64() } + #[inline] fn get_i64_le(&mut self) -> i64 { (**self).get_i64_le() } + #[inline] fn get_i64_ne(&mut self) -> i64 { (**self).get_i64_ne() } + #[inline] fn get_uint(&mut self, nbytes: usize) -> u64 { (**self).get_uint(nbytes) } + #[inline] fn get_uint_le(&mut self, nbytes: usize) -> u64 { (**self).get_uint_le(nbytes) } + #[inline] fn get_uint_ne(&mut self, nbytes: usize) -> u64 { (**self).get_uint_ne(nbytes) } + #[inline] fn get_int(&mut self, nbytes: usize) -> i64 { (**self).get_int(nbytes) } + #[inline] fn get_int_le(&mut self, nbytes: usize) -> i64 { (**self).get_int_le(nbytes) } + #[inline] fn get_int_ne(&mut self, nbytes: usize) -> i64 { (**self).get_int_ne(nbytes) } + #[inline] fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { (**self).copy_to_bytes(len) } @@ -1351,41 +1406,52 @@ impl Buf for &[u8] { #[inline] fn advance(&mut self, cnt: usize) { + if self.len() < cnt { + panic_advance(cnt, self.len()); + } + *self = &self[cnt..]; } + + #[inline] + fn copy_to_slice(&mut self, dst: &mut [u8]) { + if self.len() < dst.len() { + panic_advance(dst.len(), self.len()); + } + + dst.copy_from_slice(&self[..dst.len()]); + self.advance(dst.len()); + } } #[cfg(feature = "std")] impl> Buf for std::io::Cursor { + #[inline] fn remaining(&self) -> usize { - let len = self.get_ref().as_ref().len(); - let pos = self.position(); - - if pos >= len as u64 { - return 0; - } - - len - pos as usize + saturating_sub_usize_u64(self.get_ref().as_ref().len(), self.position()) } + #[inline] fn chunk(&self) -> &[u8] { + let slice = self.get_ref().as_ref(); + let pos = min_u64_usize(self.position(), slice.len()); + &slice[pos..] + } + + #[inline] + fn advance(&mut self, cnt: usize) { let len = self.get_ref().as_ref().len(); let pos = self.position(); - if pos >= len as u64 { - return &[]; + // We intentionally allow `cnt == 0` here even if `pos > len`. + let max_cnt = saturating_sub_usize_u64(len, pos); + if cnt > max_cnt { + panic_advance(cnt, max_cnt); } - &self.get_ref().as_ref()[pos as usize..] - } - - fn advance(&mut self, cnt: usize) { - let pos = (self.position() as usize) - .checked_add(cnt) - .expect("overflow"); - - assert!(pos <= self.get_ref().as_ref().len()); - self.set_position(pos as u64); + // This will not overflow because either `cnt == 0` or the sum is not + // greater than `len`. + self.set_position(pos + cnt as u64); } } diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs index c2ac39d..304e11b 100644 --- a/src/buf/buf_mut.rs +++ b/src/buf/buf_mut.rs @@ -1,8 +1,9 @@ use crate::buf::{limit, Chain, Limit, UninitSlice}; #[cfg(feature = "std")] use crate::buf::{writer, Writer}; +use crate::{panic_advance, panic_does_not_fit}; -use core::{cmp, mem, ptr, usize}; +use core::{mem, ptr, usize}; use alloc::{boxed::Box, vec::Vec}; @@ -67,8 +68,10 @@ pub unsafe trait BufMut { /// The next call to `chunk_mut` will return a slice starting `cnt` bytes /// further into the underlying buffer. /// - /// This function is unsafe because there is no guarantee that the bytes - /// being advanced past have been initialized. + /// # Safety + /// + /// The caller must ensure that the next `cnt` bytes of `chunk` are + /// initialized. /// /// # Examples /// @@ -121,6 +124,7 @@ pub unsafe trait BufMut { /// /// assert!(!buf.has_remaining_mut()); /// ``` + #[inline] fn has_remaining_mut(&self) -> bool { self.remaining_mut() > 0 } @@ -194,27 +198,25 @@ pub unsafe trait BufMut { /// # Panics /// /// Panics if `self` does not have enough capacity to contain `src`. + #[inline] fn put(&mut self, mut src: T) where Self: Sized, { - assert!(self.remaining_mut() >= src.remaining()); + if self.remaining_mut() < src.remaining() { + panic_advance(src.remaining(), self.remaining_mut()); + } while src.has_remaining() { - let l; - - unsafe { - let s = src.chunk(); - let d = self.chunk_mut(); - l = cmp::min(s.len(), d.len()); + let s = src.chunk(); + let d = self.chunk_mut(); + let cnt = usize::min(s.len(), d.len()); - ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l); - } + d[..cnt].copy_from_slice(&s[..cnt]); - src.advance(l); - unsafe { - self.advance_mut(l); - } + // SAFETY: We just initialized `cnt` bytes in `self`. + unsafe { self.advance_mut(cnt) }; + src.advance(cnt); } } @@ -237,31 +239,21 @@ pub unsafe trait BufMut { /// /// assert_eq!(b"hello\0", &dst); /// ``` - fn put_slice(&mut self, src: &[u8]) { - let mut off = 0; - - assert!( - self.remaining_mut() >= src.len(), - "buffer overflow; remaining = {}; src = {}", - self.remaining_mut(), - src.len() - ); - - while off < src.len() { - let cnt; - - unsafe { - let dst = self.chunk_mut(); - cnt = cmp::min(dst.len(), src.len() - off); + #[inline] + fn put_slice(&mut self, mut src: &[u8]) { + if self.remaining_mut() < src.len() { + panic_advance(src.len(), self.remaining_mut()); + } - ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt); + while !src.is_empty() { + let dst = self.chunk_mut(); + let cnt = usize::min(src.len(), dst.len()); - off += cnt; - } + dst[..cnt].copy_from_slice(&src[..cnt]); + src = &src[cnt..]; - unsafe { - self.advance_mut(cnt); - } + // SAFETY: We just initialized `cnt` bytes in `self`. + unsafe { self.advance_mut(cnt) }; } } @@ -290,9 +282,20 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. - fn put_bytes(&mut self, val: u8, cnt: usize) { - for _ in 0..cnt { - self.put_u8(val); + #[inline] + fn put_bytes(&mut self, val: u8, mut cnt: usize) { + if self.remaining_mut() < cnt { + panic_advance(cnt, self.remaining_mut()); + } + + while cnt > 0 { + let dst = self.chunk_mut(); + let dst_len = usize::min(dst.len(), cnt); + // SAFETY: The pointer is valid for `dst_len <= dst.len()` bytes. + unsafe { core::ptr::write_bytes(dst.as_mut_ptr(), val, dst_len) }; + // SAFETY: We just initialized `dst_len` bytes in `self`. + unsafe { self.advance_mut(dst_len) }; + cnt -= dst_len; } } @@ -314,6 +317,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u8(&mut self, n: u8) { let src = [n]; self.put_slice(&src); @@ -337,6 +341,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i8(&mut self, n: i8) { let src = [n as u8]; self.put_slice(&src) @@ -360,6 +365,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u16(&mut self, n: u16) { self.put_slice(&n.to_be_bytes()) } @@ -382,6 +388,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u16_le(&mut self, n: u16) { self.put_slice(&n.to_le_bytes()) } @@ -408,6 +415,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u16_ne(&mut self, n: u16) { self.put_slice(&n.to_ne_bytes()) } @@ -430,6 +438,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i16(&mut self, n: i16) { self.put_slice(&n.to_be_bytes()) } @@ -452,6 +461,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i16_le(&mut self, n: i16) { self.put_slice(&n.to_le_bytes()) } @@ -478,6 +488,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i16_ne(&mut self, n: i16) { self.put_slice(&n.to_ne_bytes()) } @@ -500,6 +511,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u32(&mut self, n: u32) { self.put_slice(&n.to_be_bytes()) } @@ -522,6 +534,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u32_le(&mut self, n: u32) { self.put_slice(&n.to_le_bytes()) } @@ -548,6 +561,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u32_ne(&mut self, n: u32) { self.put_slice(&n.to_ne_bytes()) } @@ -570,6 +584,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i32(&mut self, n: i32) { self.put_slice(&n.to_be_bytes()) } @@ -592,6 +607,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i32_le(&mut self, n: i32) { self.put_slice(&n.to_le_bytes()) } @@ -618,6 +634,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i32_ne(&mut self, n: i32) { self.put_slice(&n.to_ne_bytes()) } @@ -640,6 +657,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u64(&mut self, n: u64) { self.put_slice(&n.to_be_bytes()) } @@ -662,6 +680,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u64_le(&mut self, n: u64) { self.put_slice(&n.to_le_bytes()) } @@ -688,6 +707,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u64_ne(&mut self, n: u64) { self.put_slice(&n.to_ne_bytes()) } @@ -710,6 +730,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i64(&mut self, n: i64) { self.put_slice(&n.to_be_bytes()) } @@ -732,6 +753,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i64_le(&mut self, n: i64) { self.put_slice(&n.to_le_bytes()) } @@ -758,6 +780,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i64_ne(&mut self, n: i64) { self.put_slice(&n.to_ne_bytes()) } @@ -780,6 +803,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u128(&mut self, n: u128) { self.put_slice(&n.to_be_bytes()) } @@ -802,6 +826,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u128_le(&mut self, n: u128) { self.put_slice(&n.to_le_bytes()) } @@ -828,6 +853,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_u128_ne(&mut self, n: u128) { self.put_slice(&n.to_ne_bytes()) } @@ -850,6 +876,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i128(&mut self, n: i128) { self.put_slice(&n.to_be_bytes()) } @@ -872,6 +899,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i128_le(&mut self, n: i128) { self.put_slice(&n.to_le_bytes()) } @@ -898,6 +926,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_i128_ne(&mut self, n: i128) { self.put_slice(&n.to_ne_bytes()) } @@ -919,9 +948,15 @@ pub unsafe trait BufMut { /// # Panics /// /// This function panics if there is not enough remaining capacity in - /// `self`. + /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_uint(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); + let start = match mem::size_of_val(&n).checked_sub(nbytes) { + Some(start) => start, + None => panic_does_not_fit(nbytes, mem::size_of_val(&n)), + }; + + self.put_slice(&n.to_be_bytes()[start..]); } /// Writes an unsigned n-byte integer to `self` in the little-endian byte order. @@ -941,9 +976,16 @@ pub unsafe trait BufMut { /// # Panics /// /// This function panics if there is not enough remaining capacity in - /// `self`. + /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_uint_le(&mut self, n: u64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); + let slice = n.to_le_bytes(); + let slice = match slice.get(..nbytes) { + Some(slice) => slice, + None => panic_does_not_fit(nbytes, slice.len()), + }; + + self.put_slice(slice); } /// Writes an unsigned n-byte integer to `self` in the native-endian byte order. @@ -967,7 +1009,8 @@ pub unsafe trait BufMut { /// # Panics /// /// This function panics if there is not enough remaining capacity in - /// `self`. + /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_uint_ne(&mut self, n: u64, nbytes: usize) { if cfg!(target_endian = "big") { self.put_uint(n, nbytes) @@ -994,8 +1037,14 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_int(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_be_bytes()[mem::size_of_val(&n) - nbytes..]); + let start = match mem::size_of_val(&n).checked_sub(nbytes) { + Some(start) => start, + None => panic_does_not_fit(nbytes, mem::size_of_val(&n)), + }; + + self.put_slice(&n.to_be_bytes()[start..]); } /// Writes low `nbytes` of a signed integer to `self` in little-endian byte order. @@ -1016,8 +1065,15 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_int_le(&mut self, n: i64, nbytes: usize) { - self.put_slice(&n.to_le_bytes()[0..nbytes]); + let slice = n.to_le_bytes(); + let slice = match slice.get(..nbytes) { + Some(slice) => slice, + None => panic_does_not_fit(nbytes, slice.len()), + }; + + self.put_slice(slice); } /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order. @@ -1042,6 +1098,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self` or if `nbytes` is greater than 8. + #[inline] fn put_int_ne(&mut self, n: i64, nbytes: usize) { if cfg!(target_endian = "big") { self.put_int(n, nbytes) @@ -1069,6 +1126,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f32(&mut self, n: f32) { self.put_u32(n.to_bits()); } @@ -1092,6 +1150,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f32_le(&mut self, n: f32) { self.put_u32_le(n.to_bits()); } @@ -1119,6 +1178,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f32_ne(&mut self, n: f32) { self.put_u32_ne(n.to_bits()); } @@ -1142,6 +1202,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f64(&mut self, n: f64) { self.put_u64(n.to_bits()); } @@ -1165,6 +1226,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f64_le(&mut self, n: f64) { self.put_u64_le(n.to_bits()); } @@ -1192,6 +1254,7 @@ pub unsafe trait BufMut { /// /// This function panics if there is not enough remaining capacity in /// `self`. + #[inline] fn put_f64_ne(&mut self, n: f64) { self.put_u64_ne(n.to_bits()); } @@ -1209,6 +1272,7 @@ pub unsafe trait BufMut { /// let dst = arr.limit(10); /// assert_eq!(dst.remaining_mut(), 10); /// ``` + #[inline] fn limit(self, limit: usize) -> Limit where Self: Sized, @@ -1240,6 +1304,7 @@ pub unsafe trait BufMut { /// ``` #[cfg(feature = "std")] #[cfg_attr(docsrs, doc(cfg(feature = "std")))] + #[inline] fn writer(self) -> Writer where Self: Sized, @@ -1267,6 +1332,7 @@ pub unsafe trait BufMut { /// assert_eq!(&a[..], b"hello"); /// assert_eq!(&b[..], b" world"); /// ``` + #[inline] fn chain_mut(self, next: U) -> Chain where Self: Sized, @@ -1277,98 +1343,122 @@ pub unsafe trait BufMut { macro_rules! deref_forward_bufmut { () => { + #[inline] fn remaining_mut(&self) -> usize { (**self).remaining_mut() } + #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { (**self).chunk_mut() } + #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { (**self).advance_mut(cnt) } + #[inline] fn put_slice(&mut self, src: &[u8]) { (**self).put_slice(src) } + #[inline] fn put_u8(&mut self, n: u8) { (**self).put_u8(n) } + #[inline] fn put_i8(&mut self, n: i8) { (**self).put_i8(n) } + #[inline] fn put_u16(&mut self, n: u16) { (**self).put_u16(n) } + #[inline] fn put_u16_le(&mut self, n: u16) { (**self).put_u16_le(n) } + #[inline] fn put_u16_ne(&mut self, n: u16) { (**self).put_u16_ne(n) } + #[inline] fn put_i16(&mut self, n: i16) { (**self).put_i16(n) } + #[inline] fn put_i16_le(&mut self, n: i16) { (**self).put_i16_le(n) } + #[inline] fn put_i16_ne(&mut self, n: i16) { (**self).put_i16_ne(n) } + #[inline] fn put_u32(&mut self, n: u32) { (**self).put_u32(n) } + #[inline] fn put_u32_le(&mut self, n: u32) { (**self).put_u32_le(n) } + #[inline] fn put_u32_ne(&mut self, n: u32) { (**self).put_u32_ne(n) } + #[inline] fn put_i32(&mut self, n: i32) { (**self).put_i32(n) } + #[inline] fn put_i32_le(&mut self, n: i32) { (**self).put_i32_le(n) } + #[inline] fn put_i32_ne(&mut self, n: i32) { (**self).put_i32_ne(n) } + #[inline] fn put_u64(&mut self, n: u64) { (**self).put_u64(n) } + #[inline] fn put_u64_le(&mut self, n: u64) { (**self).put_u64_le(n) } + #[inline] fn put_u64_ne(&mut self, n: u64) { (**self).put_u64_ne(n) } + #[inline] fn put_i64(&mut self, n: i64) { (**self).put_i64(n) } + #[inline] fn put_i64_le(&mut self, n: i64) { (**self).put_i64_le(n) } + #[inline] fn put_i64_ne(&mut self, n: i64) { (**self).put_i64_ne(n) } @@ -1391,12 +1481,15 @@ unsafe impl BufMut for &mut [u8] { #[inline] fn chunk_mut(&mut self) -> &mut UninitSlice { - // UninitSlice is repr(transparent), so safe to transmute - unsafe { &mut *(*self as *mut [u8] as *mut _) } + UninitSlice::new(self) } #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { + if self.len() < cnt { + panic_advance(cnt, self.len()); + } + // Lifetime dance taken from `impl Write for &mut [u8]`. let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); *self = b; @@ -1404,14 +1497,22 @@ unsafe impl BufMut for &mut [u8] { #[inline] fn put_slice(&mut self, src: &[u8]) { - self[..src.len()].copy_from_slice(src); - unsafe { - self.advance_mut(src.len()); + if self.len() < src.len() { + panic_advance(src.len(), self.len()); } + + self[..src.len()].copy_from_slice(src); + // SAFETY: We just initialized `src.len()` bytes. + unsafe { self.advance_mut(src.len()) }; } + #[inline] fn put_bytes(&mut self, val: u8, cnt: usize) { - assert!(self.remaining_mut() >= cnt); + if self.len() < cnt { + panic_advance(cnt, self.len()); + } + + // SAFETY: We just checked that the pointer is valid for `cnt` bytes. unsafe { ptr::write_bytes(self.as_mut_ptr(), val, cnt); self.advance_mut(cnt); @@ -1432,6 +1533,10 @@ unsafe impl BufMut for &mut [core::mem::MaybeUninit] { #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { + if self.len() < cnt { + panic_advance(cnt, self.len()); + } + // Lifetime dance taken from `impl Write for &mut [u8]`. let (_, b) = core::mem::replace(self, &mut []).split_at_mut(cnt); *self = b; @@ -1439,14 +1544,24 @@ unsafe impl BufMut for &mut [core::mem::MaybeUninit] { #[inline] fn put_slice(&mut self, src: &[u8]) { - self.chunk_mut()[..src.len()].copy_from_slice(src); + if self.len() < src.len() { + panic_advance(src.len(), self.len()); + } + + // SAFETY: We just checked that the pointer is valid for `src.len()` bytes. unsafe { + ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr().cast(), src.len()); self.advance_mut(src.len()); } } + #[inline] fn put_bytes(&mut self, val: u8, cnt: usize) { - assert!(self.remaining_mut() >= cnt); + if self.len() < cnt { + panic_advance(cnt, self.len()); + } + + // SAFETY: We just checked that the pointer is valid for `cnt` bytes. unsafe { ptr::write_bytes(self.as_mut_ptr() as *mut u8, val, cnt); self.advance_mut(cnt); @@ -1466,13 +1581,11 @@ unsafe impl BufMut for Vec { let len = self.len(); let remaining = self.capacity() - len; - assert!( - cnt <= remaining, - "cannot advance past `remaining_mut`: {:?} <= {:?}", - cnt, - remaining - ); + if remaining < cnt { + panic_advance(cnt, remaining); + } + // Addition will not overflow since the sum is at most the capacity. self.set_len(len + cnt); } @@ -1486,28 +1599,26 @@ unsafe impl BufMut for Vec { let len = self.len(); let ptr = self.as_mut_ptr(); - unsafe { &mut UninitSlice::from_raw_parts_mut(ptr, cap)[len..] } + // SAFETY: Since `ptr` is valid for `cap` bytes, `ptr.add(len)` must be + // valid for `cap - len` bytes. The subtraction will not underflow since + // `len <= cap`. + unsafe { UninitSlice::from_raw_parts_mut(ptr.add(len), cap - len) } } // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. + #[inline] fn put(&mut self, mut src: T) where Self: Sized, { - // In case the src isn't contiguous, reserve upfront + // In case the src isn't contiguous, reserve upfront. self.reserve(src.remaining()); while src.has_remaining() { - let l; - - // a block to contain the src.bytes() borrow - { - let s = src.chunk(); - l = s.len(); - self.extend_from_slice(s); - } - + let s = src.chunk(); + let l = s.len(); + self.extend_from_slice(s); src.advance(l); } } @@ -1517,8 +1628,10 @@ unsafe impl BufMut for Vec { self.extend_from_slice(src); } + #[inline] fn put_bytes(&mut self, val: u8, cnt: usize) { - let new_len = self.len().checked_add(cnt).unwrap(); + // If the addition overflows, then the `resize` will fail. + let new_len = self.len().saturating_add(cnt); self.resize(new_len, val); } } diff --git a/src/buf/chain.rs b/src/buf/chain.rs index 78979a1..a2dac93 100644 --- a/src/buf/chain.rs +++ b/src/buf/chain.rs @@ -135,7 +135,7 @@ where U: Buf, { fn remaining(&self) -> usize { - self.a.remaining().checked_add(self.b.remaining()).unwrap() + self.a.remaining().saturating_add(self.b.remaining()) } fn chunk(&self) -> &[u8] { diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index b0eeed4..0715ad2 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -184,7 +184,7 @@ impl UninitSlice { /// }; /// ``` #[inline] - pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit] { + pub unsafe fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit] { &mut *(self as *mut _ as *mut [MaybeUninit]) } diff --git a/src/bytes.rs b/src/bytes.rs index 0404a72..d8911bb 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -242,7 +242,7 @@ impl Bytes { let begin = match range.start_bound() { Bound::Included(&n) => n, - Bound::Excluded(&n) => n + 1, + Bound::Excluded(&n) => n.checked_add(1).expect("out of range"), Bound::Unbounded => 0, }; diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index b4be074..c860720 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1087,14 +1087,12 @@ unsafe impl BufMut for BytesMut { #[inline] unsafe fn advance_mut(&mut self, cnt: usize) { - let new_len = self.len() + cnt; - assert!( - new_len <= self.cap, - "new_len = {}; capacity = {}", - new_len, - self.cap - ); - self.len = new_len; + let remaining = self.cap - self.len(); + if cnt > remaining { + super::panic_advance(cnt, remaining); + } + // Addition won't overflow since it is at most `self.cap`. + self.len = self.len() + cnt; } #[inline] diff --git a/src/lib.rs b/src/lib.rs index af436b3..d2d970b 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -115,3 +115,40 @@ fn abort() -> ! { panic!("abort"); } } + +#[inline(always)] +#[cfg(feature = "std")] +fn saturating_sub_usize_u64(a: usize, b: u64) -> usize { + use core::convert::TryFrom; + match usize::try_from(b) { + Ok(b) => a.saturating_sub(b), + Err(_) => 0, + } +} + +#[inline(always)] +#[cfg(feature = "std")] +fn min_u64_usize(a: u64, b: usize) -> usize { + use core::convert::TryFrom; + match usize::try_from(a) { + Ok(a) => usize::min(a, b), + Err(_) => b, + } +} + +/// Panic with a nice error message. +#[cold] +fn panic_advance(idx: usize, len: usize) -> ! { + panic!( + "advance out of bounds: the len is {} but advancing by {}", + len, idx + ); +} + +#[cold] +fn panic_does_not_fit(size: usize, nbytes: usize) -> ! { + panic!( + "size too large: the integer type can fit {} bytes, but nbytes is {}", + size, nbytes + ); +} diff --git a/tests/test_buf_mut.rs b/tests/test_buf_mut.rs index 33aa680..0abeb9f 100644 --- a/tests/test_buf_mut.rs +++ b/tests/test_buf_mut.rs @@ -83,7 +83,7 @@ fn test_put_int_le_nbytes_overflow() { } #[test] -#[should_panic(expected = "cannot advance")] +#[should_panic(expected = "advance out of bounds: the len is 8 but advancing by 12")] fn test_vec_advance_mut() { // Verify fix for #354 let mut buf = Vec::with_capacity(8); From bde8c50703869f54b905560eb62c2478a0111885 Mon Sep 17 00:00:00 2001 From: DanielB Date: Thu, 19 Oct 2023 12:50:24 -0700 Subject: [PATCH 13/53] docs: typo fix (#637) Co-authored-by: Daniel Bauman --- src/bytes.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index d8911bb..58cd1fc 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -63,8 +63,8 @@ use crate::Buf; /// `Bytes` contains a vtable, which allows implementations of `Bytes` to define /// how sharing/cloning is implemented in detail. /// When `Bytes::clone()` is called, `Bytes` will call the vtable function for -/// cloning the backing storage in order to share it behind between multiple -/// `Bytes` instances. +/// cloning the backing storage in order to share it behind multiple `Bytes` +/// instances. /// /// For `Bytes` implementations which refer to constant memory (e.g. created /// via `Bytes::from_static()`) the cloning implementation will be a no-op. From 72cbb92e0e53680c67c27b56fabbe1f3ed5dbae9 Mon Sep 17 00:00:00 2001 From: Gabriel Goller Date: Thu, 16 Nov 2023 12:24:21 +0100 Subject: [PATCH 14/53] docs: fix broken links (#639) Fixed a few broken links and converted a lot of them from the html-link to intra-doc links. --- src/buf/chain.rs | 4 +--- src/buf/iter.rs | 3 --- src/buf/mod.rs | 2 -- src/buf/reader.rs | 2 +- src/buf/take.rs | 2 +- src/buf/writer.rs | 2 +- src/bytes.rs | 4 +--- src/bytes_mut.rs | 4 +--- src/lib.rs | 11 +++-------- 9 files changed, 9 insertions(+), 25 deletions(-) diff --git a/src/buf/chain.rs b/src/buf/chain.rs index a2dac93..97ac2ec 100644 --- a/src/buf/chain.rs +++ b/src/buf/chain.rs @@ -25,9 +25,7 @@ use std::io::IoSlice; /// assert_eq!(full[..], b"hello world"[..]); /// ``` /// -/// [`Buf::chain`]: trait.Buf.html#method.chain -/// [`Buf`]: trait.Buf.html -/// [`BufMut`]: trait.BufMut.html +/// [`Buf::chain`]: Buf::chain #[derive(Debug)] pub struct Chain { a: T, diff --git a/src/buf/iter.rs b/src/buf/iter.rs index c694e3d..74f9b99 100644 --- a/src/buf/iter.rs +++ b/src/buf/iter.rs @@ -17,9 +17,6 @@ use crate::Buf; /// assert_eq!(iter.next(), Some(b'c')); /// assert_eq!(iter.next(), None); /// ``` -/// -/// [`iter`]: trait.Buf.html#method.iter -/// [`Buf`]: trait.Buf.html #[derive(Debug)] pub struct IntoIter { inner: T, diff --git a/src/buf/mod.rs b/src/buf/mod.rs index c4c0a57..1bf0a47 100644 --- a/src/buf/mod.rs +++ b/src/buf/mod.rs @@ -13,8 +13,6 @@ //! See [`Buf`] and [`BufMut`] for more details. //! //! [rope]: https://en.wikipedia.org/wiki/Rope_(data_structure) -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html mod buf_impl; mod buf_mut; diff --git a/src/buf/reader.rs b/src/buf/reader.rs index f2b4d98..5214949 100644 --- a/src/buf/reader.rs +++ b/src/buf/reader.rs @@ -5,7 +5,7 @@ use std::{cmp, io}; /// A `Buf` adapter which implements `io::Read` for the inner value. /// /// This struct is generally created by calling `reader()` on `Buf`. See -/// documentation of [`reader()`](trait.Buf.html#method.reader) for more +/// documentation of [`reader()`](Buf::reader) for more /// details. #[derive(Debug)] pub struct Reader { diff --git a/src/buf/take.rs b/src/buf/take.rs index d3cb10a..a16a434 100644 --- a/src/buf/take.rs +++ b/src/buf/take.rs @@ -5,7 +5,7 @@ use core::cmp; /// A `Buf` adapter which limits the bytes read from an underlying buffer. /// /// This struct is generally created by calling `take()` on `Buf`. See -/// documentation of [`take()`](trait.Buf.html#method.take) for more details. +/// documentation of [`take()`](Buf::take) for more details. #[derive(Debug)] pub struct Take { inner: T, diff --git a/src/buf/writer.rs b/src/buf/writer.rs index 261d7cd..08f15d2 100644 --- a/src/buf/writer.rs +++ b/src/buf/writer.rs @@ -5,7 +5,7 @@ use std::{cmp, io}; /// A `BufMut` adapter which implements `io::Write` for the inner value. /// /// This struct is generally created by calling `writer()` on `BufMut`. See -/// documentation of [`writer()`](trait.BufMut.html#method.writer) for more +/// documentation of [`writer()`](BufMut::writer) for more /// details. #[derive(Debug)] pub struct Writer { diff --git a/src/bytes.rs b/src/bytes.rs index 58cd1fc..9fed3d2 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -438,7 +438,7 @@ impl Bytes { /// If `len` is greater than the buffer's current length, this has no /// effect. /// - /// The [`split_off`] method can emulate `truncate`, but this causes the + /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples @@ -450,8 +450,6 @@ impl Bytes { /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` - /// - /// [`split_off`]: #method.split_off #[inline] pub fn truncate(&mut self, len: usize) { if len < self.len { diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index c860720..57fd33e 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -399,7 +399,7 @@ impl BytesMut { /// /// Existing underlying capacity is preserved. /// - /// The [`split_off`] method can emulate `truncate`, but this causes the + /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the /// excess bytes to be returned instead of dropped. /// /// # Examples @@ -411,8 +411,6 @@ impl BytesMut { /// buf.truncate(5); /// assert_eq!(buf, b"hello"[..]); /// ``` - /// - /// [`split_off`]: #method.split_off pub fn truncate(&mut self, len: usize) { if len <= self.len() { unsafe { diff --git a/src/lib.rs b/src/lib.rs index d2d970b..1b3e6fc 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,12 +9,9 @@ //! Provides abstractions for working with bytes. //! //! The `bytes` crate provides an efficient byte buffer structure -//! ([`Bytes`](struct.Bytes.html)) and traits for working with buffer +//! ([`Bytes`]) and traits for working with buffer //! implementations ([`Buf`], [`BufMut`]). //! -//! [`Buf`]: trait.Buf.html -//! [`BufMut`]: trait.BufMut.html -//! //! # `Bytes` //! //! `Bytes` is an efficient container for storing and operating on contiguous @@ -52,9 +49,7 @@ //! `a` and `b` will share the underlying buffer and maintain indices tracking //! the view into the buffer represented by the handle. //! -//! See the [struct docs] for more details. -//! -//! [struct docs]: struct.Bytes.html +//! See the [struct docs](`Bytes`) for more details. //! //! # `Buf`, `BufMut` //! @@ -70,7 +65,7 @@ //! ## Relation with `Read` and `Write` //! //! At first glance, it may seem that `Buf` and `BufMut` overlap in -//! functionality with `std::io::Read` and `std::io::Write`. However, they +//! functionality with [`std::io::Read`] and [`std::io::Write`]. However, they //! serve different purposes. A buffer is the value that is provided as an //! argument to `Read::read` and `Write::write`. `Read` and `Write` may then //! perform a syscall, which has the potential of failing. Operations on `Buf` From f73c6c8e8543ee15741c788d105e2b4235f1bc7b Mon Sep 17 00:00:00 2001 From: Luca Bruno Date: Thu, 28 Dec 2023 06:17:53 +0100 Subject: [PATCH 15/53] Simplify UninitSlice::as_uninit_slice_mut() logic (#644) This reworks `UninitSlice::as_uninit_slice_mut()` using equivalent simpler logic. --- src/buf/uninit_slice.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs index 0715ad2..82ebdbb 100644 --- a/src/buf/uninit_slice.rs +++ b/src/buf/uninit_slice.rs @@ -185,7 +185,7 @@ impl UninitSlice { /// ``` #[inline] pub unsafe fn as_uninit_slice_mut(&mut self) -> &mut [MaybeUninit] { - &mut *(self as *mut _ as *mut [MaybeUninit]) + &mut self.0 } /// Returns the number of bytes in the slice. From dbbdb63d7691066922ac4c7753e6dd95c07f8fbf Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Thu, 28 Dec 2023 00:20:13 -0500 Subject: [PATCH 16/53] Use `self.` instead of `Self::` (#642) I was a little confused about these calls using `Self::` instead of `self.` here. Is there a reason to use the former instead of the latter? --- src/buf/buf_impl.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs index b4ebf40..9367eb2 100644 --- a/src/buf/buf_impl.rs +++ b/src/buf/buf_impl.rs @@ -991,7 +991,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f32(&mut self) -> f32 { - f32::from_bits(Self::get_u32(self)) + f32::from_bits(self.get_u32()) } /// Gets an IEEE754 single-precision (4 bytes) floating point number from @@ -1012,7 +1012,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_le(&mut self) -> f32 { - f32::from_bits(Self::get_u32_le(self)) + f32::from_bits(self.get_u32_le()) } /// Gets an IEEE754 single-precision (4 bytes) floating point number from @@ -1036,7 +1036,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f32_ne(&mut self) -> f32 { - f32::from_bits(Self::get_u32_ne(self)) + f32::from_bits(self.get_u32_ne()) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from @@ -1057,7 +1057,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f64(&mut self) -> f64 { - f64::from_bits(Self::get_u64(self)) + f64::from_bits(self.get_u64()) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from @@ -1078,7 +1078,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_le(&mut self) -> f64 { - f64::from_bits(Self::get_u64_le(self)) + f64::from_bits(self.get_u64_le()) } /// Gets an IEEE754 double-precision (8 bytes) floating point number from @@ -1102,7 +1102,7 @@ pub trait Buf { /// /// This function panics if there is not enough remaining data in `self`. fn get_f64_ne(&mut self) -> f64 { - f64::from_bits(Self::get_u64_ne(self)) + f64::from_bits(self.get_u64_ne()) } /// Consumes `len` bytes inside self and returns new instance of `Bytes` From 3bf6583b5cece02526b9b225e6ace0552a36ded3 Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Wed, 3 Jan 2024 17:22:39 +0100 Subject: [PATCH 17/53] readme: add security policy (#649) --- SECURITY.md | 9 +++++++++ 1 file changed, 9 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000..b74a831 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,9 @@ +# Security Policy + +Bytes is part of the Tokio project and uses the same security policy as [Tokio][tokio-security]. + +## Report a security issue + +The process for reporting an issue is the same as for [Tokio][tokio-security]. This includes private reporting via security@tokio.rs. + +[tokio-security]: https://github.com/tokio-rs/tokio/security/policy From fbc64bcc6713b51fa1253cf18fc80c904796ddb5 Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 8 Jan 2024 01:10:48 +0900 Subject: [PATCH 18/53] Update loom to 0.7 (#651) --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 06b19e6..cf72180 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ serde = { version = "1.0.60", optional = true, default-features = false, feature serde_test = "1.0" [target.'cfg(loom)'.dev-dependencies] -loom = "0.5" +loom = "0.7" [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] From 09214ba51bdace6f6cb91740cee9514fc08d55ce Mon Sep 17 00:00:00 2001 From: Taiki Endo Date: Mon, 8 Jan 2024 01:11:02 +0900 Subject: [PATCH 19/53] Update CI config (#650) --- .github/workflows/ci.yml | 54 +++++++++++++++++++++++----------------- Cargo.toml | 3 ++- ci/test-stable.sh | 6 ----- ci/tsan.sh | 0 4 files changed, 33 insertions(+), 30 deletions(-) mode change 100644 => 100755 ci/test-stable.sh mode change 100644 => 100755 ci/tsan.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a4f7b1d..c0658a1 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,8 @@ on: push: branches: - master + schedule: + - cron: '0 2 * * 0' env: RUSTFLAGS: -Dwarnings @@ -23,11 +25,11 @@ jobs: name: rustfmt runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust - run: rustup update stable && rustup default stable + run: rustup update stable - name: Check formatting - run: cargo fmt --all -- --check + run: cargo fmt --all --check # TODO # # Apply clippy lints @@ -35,7 +37,7 @@ jobs: # name: clippy # runs-on: ubuntu-latest # steps: - # - uses: actions/checkout@v3 + # - uses: actions/checkout@v4 # - name: Apply clippy lints # run: cargo clippy --all-features @@ -48,11 +50,11 @@ jobs: name: minrust runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - name: Install Rust - run: rustup update 1.39.0 && rustup default 1.39.0 + - uses: actions/checkout@v4 + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: Check - run: . ci/test-stable.sh check + run: cargo hack check --feature-powerset --optional-deps --rust-version # Stable stable: @@ -65,23 +67,27 @@ jobs: - windows-latest runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust # --no-self-update is necessary because the windows environment cannot self-update rustup.exe. - run: rustup update stable --no-self-update && rustup default stable + run: rustup update stable --no-self-update + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: Test - run: . ci/test-stable.sh test + run: ci/test-stable.sh test # Nightly nightly: name: nightly runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update $nightly && rustup default $nightly + - name: Install cargo-hack + uses: taiki-e/install-action@cargo-hack - name: Test - run: . ci/test-stable.sh test + run: ci/test-stable.sh test # Run tests on some extra platforms cross: @@ -96,13 +102,14 @@ jobs: - wasm32-unknown-unknown runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust - run: rustup update stable && rustup default stable + run: rustup update stable + - name: Install cross + uses: taiki-e/install-action@cross + if: matrix.target != 'wasm32-unknown-unknown' - name: cross build --target ${{ matrix.target }} - run: | - cargo install cross - cross build --target ${{ matrix.target }} + run: cross build --target ${{ matrix.target }} if: matrix.target != 'wasm32-unknown-unknown' # WASM support - name: cargo build --target ${{ matrix.target }} @@ -116,18 +123,19 @@ jobs: name: tsan runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Install rust-src run: rustup component add rust-src - name: ASAN / TSAN - run: . ci/tsan.sh + run: ci/tsan.sh + miri: name: miri runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Miri run: ci/miri.sh @@ -136,7 +144,7 @@ jobs: name: loom runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Loom tests @@ -155,7 +163,7 @@ jobs: - loom runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Rust run: rustup update $nightly && rustup default $nightly - name: Build documentation diff --git a/Cargo.toml b/Cargo.toml index cf72180..127d81d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -5,6 +5,8 @@ name = "bytes" # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. version = "1.5.0" +edition = "2018" +rust-version = "1.39" license = "MIT" authors = [ "Carl Lerche ", @@ -15,7 +17,6 @@ repository = "https://github.com/tokio-rs/bytes" readme = "README.md" keywords = ["buffers", "zero-copy", "io"] categories = ["network-programming", "data-structures"] -edition = "2018" [features] default = ["std"] diff --git a/ci/test-stable.sh b/ci/test-stable.sh old mode 100644 new mode 100755 index 4421f3a..a8eaa3c --- a/ci/test-stable.sh +++ b/ci/test-stable.sh @@ -4,10 +4,6 @@ set -ex cmd="${1:-test}" -# Install cargo-hack for feature flag test -host=$(rustc -Vv | grep host | sed 's/host: //') -curl -LsSf https://github.com/taiki-e/cargo-hack/releases/latest/download/cargo-hack-$host.tar.gz | tar xzf - -C ~/.cargo/bin - # Run with each feature # * --each-feature includes both default/no-default features # * --optional-deps is needed for serde feature @@ -15,8 +11,6 @@ cargo hack "${cmd}" --each-feature --optional-deps # Run with all features cargo "${cmd}" --all-features -cargo doc --no-deps --all-features - if [[ "${RUST_VERSION}" == "nightly"* ]]; then # Check benchmarks cargo check --benches diff --git a/ci/tsan.sh b/ci/tsan.sh old mode 100644 new mode 100755 From abb4a2e66cab68a6d1deb3d37377625443794cfd Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Fri, 19 Jan 2024 05:08:27 -0500 Subject: [PATCH 20/53] BytesMut: Assert alignment of Shared (#652) Back in #362, an assertion was added to ensure that the alignment of bytes::Shared is even so we can use the least significant bit as a flag. bytes_mut::Shared uses the same technique but has no such assertion so I've added one here. --- src/bytes_mut.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 57fd33e..dd4ff50 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -80,6 +80,12 @@ struct Shared { ref_count: AtomicUsize, } +// Assert that the alignment of `Shared` is divisible by 2. +// This is a necessary invariant since we depend on allocating `Shared` a +// shared object to implicitly carry the `KIND_ARC` flag in its pointer. +// This flag is set when the LSB is 0. +const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2. + // Buffer storage strategy flags. const KIND_ARC: usize = 0b0; const KIND_VEC: usize = 0b1; From 0864aea9704ac12fa53ee96a7f968e51c9dabba1 Mon Sep 17 00:00:00 2001 From: Cyborus04 Date: Fri, 19 Jan 2024 17:59:30 -0500 Subject: [PATCH 21/53] add `Bytes::is_unique` (#643) --- src/bytes.rs | 50 +++++++++++++++++++++++++++++++++++++++++++++ src/bytes_mut.rs | 1 + tests/test_bytes.rs | 33 ++++++++++++++++++++++++++++++ 3 files changed, 84 insertions(+) diff --git a/src/bytes.rs b/src/bytes.rs index 9fed3d2..9eda9f4 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -112,6 +112,8 @@ pub(crate) struct Vtable { /// /// takes `Bytes` to value pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec, + /// fn(data) + pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool, /// fn(data, ptr, len) pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize), } @@ -208,6 +210,28 @@ impl Bytes { self.len == 0 } + /// Returns true if this is the only reference to the data. + /// + /// Always returns false if the data is backed by a static slice. + /// + /// The result of this method may be invalidated immediately if another + /// thread clones this value while this is being called. Ensure you have + /// unique access to this value (`&mut Bytes`) first if you need to be + /// certain the result is valid (i.e. for safety reasons) + /// # Examples + /// + /// ``` + /// use bytes::Bytes; + /// + /// let a = Bytes::from(vec![1, 2, 3]); + /// assert!(a.is_unique()); + /// let b = a.clone(); + /// assert!(!a.is_unique()); + /// ``` + pub fn is_unique(&self) -> bool { + unsafe { (self.vtable.is_unique)(&self.data) } + } + /// Creates `Bytes` instance from slice, by copying it. pub fn copy_from_slice(data: &[u8]) -> Self { data.to_vec().into() @@ -898,6 +922,7 @@ impl fmt::Debug for Vtable { const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, to_vec: static_to_vec, + is_unique: static_is_unique, drop: static_drop, }; @@ -911,6 +936,10 @@ unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec) -> bool { + false +} + unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { // nothing to drop for &'static [u8] } @@ -920,12 +949,14 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, to_vec: promotable_even_to_vec, + is_unique: promotable_is_unique, drop: promotable_even_drop, }; static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, to_vec: promotable_odd_to_vec, + is_unique: promotable_is_unique, drop: promotable_odd_drop, }; @@ -1020,6 +1051,18 @@ unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usi }); } +unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + let ref_cnt = (*shared.cast::()).ref_cnt.load(Ordering::Relaxed); + ref_cnt == 1 + } else { + true + } +} + unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { let cap = (offset as usize - buf as usize) + len; dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) @@ -1049,6 +1092,7 @@ const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignm static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, to_vec: shared_to_vec, + is_unique: shared_is_unique, drop: shared_drop, }; @@ -1094,6 +1138,12 @@ unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) } +pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool { + let shared = data.load(Ordering::Acquire); + let ref_cnt = (*shared.cast::()).ref_cnt.load(Ordering::Relaxed); + ref_cnt == 1 +} + unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { release_shared(shared.cast()); diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index dd4ff50..88d596c 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1708,6 +1708,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, to_vec: shared_v_to_vec, + is_unique: crate::bytes::shared_is_unique, drop: shared_v_drop, }; diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 5ec60a5..76adfdb 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1208,3 +1208,36 @@ fn test_bytes_capacity_len() { } } } + +#[test] +fn static_is_unique() { + let b = Bytes::from_static(LONG); + assert!(!b.is_unique()); +} + +#[test] +fn vec_is_unique() { + let v: Vec = LONG.to_vec(); + let b = Bytes::from(v); + assert!(b.is_unique()); +} + +#[test] +fn arc_is_unique() { + let v: Vec = LONG.to_vec(); + let b = Bytes::from(v); + let c = b.clone(); + assert!(!b.is_unique()); + drop(c); + assert!(b.is_unique()); +} + +#[test] +fn shared_is_unique() { + let v: Vec = LONG.to_vec(); + let b = Bytes::from(v); + let c = b.clone(); + assert!(!c.is_unique()); + drop(b); + assert!(c.is_unique()); +} From 0ba3b4c4cd74a0ad8566277e1a1533fa9e895756 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sun, 28 Jan 2024 05:37:11 -0500 Subject: [PATCH 22/53] Remove unnecessary namespace qualifier (#660) --- src/bytes.rs | 2 +- src/bytes_mut.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 9eda9f4..3da747d 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -580,7 +580,7 @@ impl Buf for Bytes { } } - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { + fn copy_to_bytes(&mut self, len: usize) -> Self { if len == self.remaining() { core::mem::replace(self, Bytes::new()) } else { diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 88d596c..1628a85 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1078,7 +1078,7 @@ impl Buf for BytesMut { } } - fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { + fn copy_to_bytes(&mut self, len: usize) -> Bytes { self.split_to(len).freeze() } } @@ -1110,7 +1110,7 @@ unsafe impl BufMut for BytesMut { // Specialize these methods so they can skip checking `remaining_mut` // and `advance_mut`. - fn put(&mut self, mut src: T) + fn put(&mut self, mut src: T) where Self: Sized, { From 9257a6ea0852c03f4672e5f8346d3d614543e270 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sun, 28 Jan 2024 05:50:56 -0500 Subject: [PATCH 23/53] Remove an unnecessary else branch (#662) --- src/bytes_mut.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 1628a85..d143f60 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -726,11 +726,11 @@ impl BytesMut { } return; - } else { - new_cap = cmp::max(new_cap, original_capacity); } } + new_cap = cmp::max(new_cap, original_capacity); + // Create a new vector to store the data let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap)); From e24587dd6197dbc58d6c2b6eb7186df99b04d881 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sun, 28 Jan 2024 07:00:30 -0500 Subject: [PATCH 24/53] Remove unreachable else branch (#661) --- src/bytes_mut.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index d143f60..8783ae7 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1628,7 +1628,7 @@ impl From for Vec { let (off, _) = bytes.get_vec_pos(); rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) } - } else if kind == KIND_ARC { + } else { let shared = bytes.data as *mut Shared; if unsafe { (*shared).is_unique() } { @@ -1640,8 +1640,6 @@ impl From for Vec { } else { return bytes.deref().to_vec(); } - } else { - return bytes.deref().to_vec(); }; let len = bytes.len; From d2e7abdb290e663f025a22a7d9e14e019b6abdb2 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Wed, 31 Jan 2024 09:41:23 -0500 Subject: [PATCH 25/53] refactor: make parameter mut in From (#667) Instead of re-declaring `vec`, we can just use a mut parameter. --- src/bytes.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 3da747d..0b443c8 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -828,8 +828,7 @@ impl From<&'static str> for Bytes { } impl From> for Bytes { - fn from(vec: Vec) -> Bytes { - let mut vec = vec; + fn from(mut vec: Vec) -> Bytes { let ptr = vec.as_mut_ptr(); let len = vec.len(); let cap = vec.capacity(); From 8bcac21cb44c112f20e8dd31475033ff448e35ce Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 6 Feb 2024 13:17:01 -0500 Subject: [PATCH 26/53] Restore commented tests (#665) These seem to have been commented by accident in #298, and are still passing. --- src/bytes_mut.rs | 79 +++++++++++++++++++++++++----------------------- 1 file changed, 41 insertions(+), 38 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 8783ae7..5566f2d 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1409,56 +1409,59 @@ fn original_capacity_from_repr(repr: usize) -> usize { 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1)) } -/* -#[test] -fn test_original_capacity_to_repr() { - assert_eq!(original_capacity_to_repr(0), 0); +#[cfg(test)] +mod tests { + use super::*; - let max_width = 32; + #[test] + fn test_original_capacity_to_repr() { + assert_eq!(original_capacity_to_repr(0), 0); - for width in 1..(max_width + 1) { - let cap = 1 << width - 1; + let max_width = 32; - let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { - 0 - } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { - width - MIN_ORIGINAL_CAPACITY_WIDTH - } else { - MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH - }; + for width in 1..(max_width + 1) { + let cap = 1 << width - 1; - assert_eq!(original_capacity_to_repr(cap), expected); + let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH { + 0 + } else if width < MAX_ORIGINAL_CAPACITY_WIDTH { + width - MIN_ORIGINAL_CAPACITY_WIDTH + } else { + MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH + }; - if width > 1 { - assert_eq!(original_capacity_to_repr(cap + 1), expected); - } + assert_eq!(original_capacity_to_repr(cap), expected); - // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below - if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { - assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); - assert_eq!(original_capacity_to_repr(cap + 76), expected); - } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { - assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); - assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); + if width > 1 { + assert_eq!(original_capacity_to_repr(cap + 1), expected); + } + + // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below + if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 { + assert_eq!(original_capacity_to_repr(cap - 24), expected - 1); + assert_eq!(original_capacity_to_repr(cap + 76), expected); + } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 { + assert_eq!(original_capacity_to_repr(cap - 1), expected - 1); + assert_eq!(original_capacity_to_repr(cap - 48), expected - 1); + } } } -} -#[test] -fn test_original_capacity_from_repr() { - assert_eq!(0, original_capacity_from_repr(0)); + #[test] + fn test_original_capacity_from_repr() { + assert_eq!(0, original_capacity_from_repr(0)); - let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; + let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH; - assert_eq!(min_cap, original_capacity_from_repr(1)); - assert_eq!(min_cap * 2, original_capacity_from_repr(2)); - assert_eq!(min_cap * 4, original_capacity_from_repr(3)); - assert_eq!(min_cap * 8, original_capacity_from_repr(4)); - assert_eq!(min_cap * 16, original_capacity_from_repr(5)); - assert_eq!(min_cap * 32, original_capacity_from_repr(6)); - assert_eq!(min_cap * 64, original_capacity_from_repr(7)); + assert_eq!(min_cap, original_capacity_from_repr(1)); + assert_eq!(min_cap * 2, original_capacity_from_repr(2)); + assert_eq!(min_cap * 4, original_capacity_from_repr(3)); + assert_eq!(min_cap * 8, original_capacity_from_repr(4)); + assert_eq!(min_cap * 16, original_capacity_from_repr(5)); + assert_eq!(min_cap * 32, original_capacity_from_repr(6)); + assert_eq!(min_cap * 64, original_capacity_from_repr(7)); + } } -*/ unsafe impl Send for BytesMut {} unsafe impl Sync for BytesMut {} From 47e83056f28e15e4ca68056a0136f3920b753783 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 6 Feb 2024 13:41:44 -0500 Subject: [PATCH 27/53] Use sub instead of offset (#668) We're always subtracting here, and we already have a usize, so `sub` seems like a more appropriate usage to me. --- src/bytes_mut.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 5566f2d..d1c1411 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -617,7 +617,7 @@ impl BytesMut { // // Just move the pointer back to the start after copying // data back. - let base_ptr = self.ptr.as_ptr().offset(-(off as isize)); + let base_ptr = self.ptr.as_ptr().sub(off); // Since `off >= self.len()`, the two regions don't overlap. ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); self.ptr = vptr(base_ptr); @@ -1697,7 +1697,7 @@ fn offset_from(dst: *mut u8, original: *mut u8) -> usize { } unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { - let ptr = ptr.offset(-(off as isize)); + let ptr = ptr.sub(off); len += off; cap += off; From c6972d61328be113ec8e80c207815a4b84fe616c Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 6 Feb 2024 13:46:49 -0500 Subject: [PATCH 28/53] Calculate original capacity only if necessary (#666) We don't need the original capacity if the shared data is unique, so let's not calculate it until after that check. --- src/bytes_mut.rs | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index d1c1411..619defc 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -652,13 +652,7 @@ impl BytesMut { // Compute the new capacity let mut new_cap = len.checked_add(additional).expect("overflow"); - let original_capacity; - let original_capacity_repr; - unsafe { - original_capacity_repr = (*shared).original_capacity_repr; - original_capacity = original_capacity_from_repr(original_capacity_repr); - // First, try to reclaim the buffer. This is possible if the current // handle is the only outstanding handle pointing to the buffer. if (*shared).is_unique() { @@ -729,6 +723,9 @@ impl BytesMut { } } + let original_capacity_repr = unsafe { (*shared).original_capacity_repr }; + let original_capacity = original_capacity_from_repr(original_capacity_repr); + new_cap = cmp::max(new_cap, original_capacity); // Create a new vector to store the data From f586ffc52589f01be1b4a44d6544b3d0226773d6 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 6 Feb 2024 14:03:37 -0500 Subject: [PATCH 29/53] set_vec_pos does not need a second parameter (#672) The second argument to `set_vec_pos` always contains the value of `self.data`. Let's just use `self.data` and remove the second parameter altogether. --- src/bytes_mut.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 619defc..bb72a21 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -247,7 +247,7 @@ impl BytesMut { if self.kind() == KIND_VEC { // Just re-use `Bytes` internal Vec vtable unsafe { - let (off, _) = self.get_vec_pos(); + let off = self.get_vec_pos(); let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); mem::forget(self); let mut b: Bytes = vec.into(); @@ -596,7 +596,7 @@ impl BytesMut { // We need to make sure that this optimization does not kill the // amortized runtimes of BytesMut's operations. unsafe { - let (off, prev) = self.get_vec_pos(); + let off = self.get_vec_pos(); // Only reuse space if we can satisfy the requested additional space. // @@ -621,7 +621,7 @@ impl BytesMut { // Since `off >= self.len()`, the two regions don't overlap. ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len); self.ptr = vptr(base_ptr); - self.set_vec_pos(0, prev); + self.set_vec_pos(0); // Length stays constant, but since we moved backwards we // can gain capacity back. @@ -867,11 +867,10 @@ impl BytesMut { // complicated. First, we have to track how far ahead the // "start" of the byte buffer from the beginning of the vec. We // also have to ensure that we don't exceed the maximum shift. - let (mut pos, prev) = self.get_vec_pos(); - pos += start; + let pos = self.get_vec_pos() + start; if pos <= MAX_VEC_POS { - self.set_vec_pos(pos, prev); + self.set_vec_pos(pos); } else { // The repr must be upgraded to ARC. This will never happen // on 64 bit systems and will only happen on 32 bit systems @@ -979,19 +978,18 @@ impl BytesMut { } #[inline] - unsafe fn get_vec_pos(&mut self) -> (usize, usize) { + unsafe fn get_vec_pos(&mut self) -> usize { debug_assert_eq!(self.kind(), KIND_VEC); - let prev = self.data as usize; - (prev >> VEC_POS_OFFSET, prev) + self.data as usize >> VEC_POS_OFFSET } #[inline] - unsafe fn set_vec_pos(&mut self, pos: usize, prev: usize) { + unsafe fn set_vec_pos(&mut self, pos: usize) { debug_assert_eq!(self.kind(), KIND_VEC); debug_assert!(pos <= MAX_VEC_POS); - self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)); + self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK)); } /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit`. @@ -1040,7 +1038,7 @@ impl Drop for BytesMut { if kind == KIND_VEC { unsafe { - let (off, _) = self.get_vec_pos(); + let off = self.get_vec_pos(); // Vector storage, free the vector let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); @@ -1625,7 +1623,7 @@ impl From for Vec { let mut vec = if kind == KIND_VEC { unsafe { - let (off, _) = bytes.get_vec_pos(); + let off = bytes.get_vec_pos(); rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off) } } else { From 1bcd2129d195a0722d8b5b1a16c7d33698701f2e Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 6 Feb 2024 17:34:30 -0500 Subject: [PATCH 30/53] get_vec_pos: use &self instead of &mut self (#670) I can't see any reason that get_vec_pos needs a &mut self. --- src/bytes_mut.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index bb72a21..9018896 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -243,7 +243,7 @@ impl BytesMut { /// th.join().unwrap(); /// ``` #[inline] - pub fn freeze(mut self) -> Bytes { + pub fn freeze(self) -> Bytes { if self.kind() == KIND_VEC { // Just re-use `Bytes` internal Vec vtable unsafe { @@ -978,7 +978,7 @@ impl BytesMut { } #[inline] - unsafe fn get_vec_pos(&mut self) -> usize { + unsafe fn get_vec_pos(&self) -> usize { debug_assert_eq!(self.kind(), KIND_VEC); self.data as usize >> VEC_POS_OFFSET @@ -1618,7 +1618,7 @@ impl PartialEq for BytesMut { } impl From for Vec { - fn from(mut bytes: BytesMut) -> Self { + fn from(bytes: BytesMut) -> Self { let kind = bytes.kind(); let mut vec = if kind == KIND_VEC { From 46289278f52a26c12298779f4aaebad1dcb26d35 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Fri, 23 Feb 2024 17:22:58 -0500 Subject: [PATCH 31/53] Refactor split_at/split_to (#663) * set len a little more concisely * inline set_end * remove kind assertions * remove a duplicate assertion * remove redundant assertion and min * rename set_start to advance_unchecked --- src/bytes_mut.rs | 50 +++++++++++++++++++++++------------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 9018896..220bdb0 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -317,8 +317,10 @@ impl BytesMut { ); unsafe { let mut other = self.shallow_clone(); - other.set_start(at); - self.set_end(at); + // SAFETY: We've checked that `at` <= `self.capacity()` above. + other.advance_unchecked(at); + self.cap = at; + self.len = cmp::min(self.len, at); other } } @@ -391,8 +393,11 @@ impl BytesMut { unsafe { let mut other = self.shallow_clone(); - other.set_end(at); - self.set_start(at); + // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <= + // `self.capacity()`. + self.advance_unchecked(at); + other.cap = at; + other.len = at; other } } @@ -851,14 +856,19 @@ impl BytesMut { unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) } } - unsafe fn set_start(&mut self, start: usize) { + /// Advance the buffer without bounds checking. + /// + /// # SAFETY + /// + /// The caller must ensure that `count` <= `self.cap`. + unsafe fn advance_unchecked(&mut self, count: usize) { // Setting the start to 0 is a no-op, so return early if this is the // case. - if start == 0 { + if count == 0 { return; } - debug_assert!(start <= self.cap, "internal: set_start out of bounds"); + debug_assert!(count <= self.cap, "internal: set_start out of bounds"); let kind = self.kind(); @@ -867,7 +877,7 @@ impl BytesMut { // complicated. First, we have to track how far ahead the // "start" of the byte buffer from the beginning of the vec. We // also have to ensure that we don't exceed the maximum shift. - let pos = self.get_vec_pos() + start; + let pos = self.get_vec_pos() + count; if pos <= MAX_VEC_POS { self.set_vec_pos(pos); @@ -883,23 +893,9 @@ impl BytesMut { // Updating the start of the view is setting `ptr` to point to the // new start and updating the `len` field to reflect the new length // of the view. - self.ptr = vptr(self.ptr.as_ptr().add(start)); - - if self.len >= start { - self.len -= start; - } else { - self.len = 0; - } - - self.cap -= start; - } - - unsafe fn set_end(&mut self, end: usize) { - debug_assert_eq!(self.kind(), KIND_ARC); - assert!(end <= self.cap, "set_end out of bounds"); - - self.cap = end; - self.len = cmp::min(self.len, end); + self.ptr = vptr(self.ptr.as_ptr().add(count)); + self.len = self.len.checked_sub(count).unwrap_or(0); + self.cap -= count; } fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> { @@ -1069,7 +1065,9 @@ impl Buf for BytesMut { self.remaining(), ); unsafe { - self.set_start(cnt); + // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that + // `self.remaining()` <= `self.cap`. + self.advance_unchecked(cnt); } } From 99584cc10d66cb6880a20c5ac9b9a960f9c17823 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sat, 2 Mar 2024 10:40:17 -0500 Subject: [PATCH 32/53] Use Iterator from the prelude (#673) CI is [failing][failure] due to unused_imports because Iterator is already in the prelude. Removing it fixes things up. [failure]: https://github.com/tokio-rs/bytes/actions/runs/8034858583/job/21946873895 --- src/bytes_mut.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 220bdb0..734f4df 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1,4 +1,4 @@ -use core::iter::{FromIterator, Iterator}; +use core::iter::FromIterator; use core::mem::{self, ManuallyDrop, MaybeUninit}; use core::ops::{Deref, DerefMut}; use core::ptr::{self, NonNull}; From c5fae00c76dbd1af7ea7b6cde7a9281d82ee7cd2 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sun, 3 Mar 2024 08:59:30 -0500 Subject: [PATCH 33/53] copy_to_bytes: Add panic section to docs (#676) Fixes #454. --- src/buf/buf_impl.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs index 9367eb2..38ecf4b 100644 --- a/src/buf/buf_impl.rs +++ b/src/buf/buf_impl.rs @@ -1120,6 +1120,10 @@ pub trait Buf { /// let bytes = (&b"hello world"[..]).copy_to_bytes(5); /// assert_eq!(&bytes[..], &b"hello"[..]); /// ``` + /// + /// # Panics + /// + /// This function panics if `len > self.remaining()`. fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes { use super::BufMut; From 7968f6f83d17175683e04ce56aa48e44ed7d0d98 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Mon, 4 Mar 2024 03:04:40 -0500 Subject: [PATCH 34/53] Remove redundant reserve call (#674) --- src/bytes_mut.rs | 2 -- tests/test_bytes.rs | 22 ++++++++++++++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 734f4df..1b4a4d9 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1283,9 +1283,7 @@ impl Extend for BytesMut { // TODO: optimize // 1. If self.kind() == KIND_VEC, use Vec::extend - // 2. Make `reserve` inline-able for b in iter { - self.reserve(1); self.put_u8(b); } } diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 76adfdb..e3820d7 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -598,6 +598,28 @@ fn extend_mut_from_bytes() { assert_eq!(*bytes, LONG[..]); } +#[test] +fn extend_past_lower_limit_of_size_hint() { + // See https://github.com/tokio-rs/bytes/pull/674#pullrequestreview-1913035700 + struct Iter(I); + + impl> Iterator for Iter { + type Item = u8; + + fn next(&mut self) -> Option { + self.0.next() + } + + fn size_hint(&self) -> (usize, Option) { + (5, None) + } + } + + let mut bytes = BytesMut::with_capacity(5); + bytes.extend(Iter(std::iter::repeat(0).take(10))); + assert_eq!(bytes.len(), 10); +} + #[test] fn extend_mut_without_size_hint() { let mut bytes = BytesMut::with_capacity(0); From ca004117f86afccd36148dee7c8413cfaf9de6a4 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Mon, 4 Mar 2024 03:05:00 -0500 Subject: [PATCH 35/53] Remove commented tests for Bytes::unsplit (#677) Bytes doesn't have an unsplit method anymore. We can always retrieve these from git history if necessary. --- tests/test_bytes.rs | 91 --------------------------------------------- 1 file changed, 91 deletions(-) diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index e3820d7..84c3d5a 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -732,97 +732,6 @@ fn partial_eq_bytesmut() { assert!(bytesmut != bytes2); } -/* -#[test] -fn bytes_unsplit_basic() { - let buf = Bytes::from(&b"aaabbbcccddd"[..]); - - let splitted = buf.split_off(6); - assert_eq!(b"aaabbb", &buf[..]); - assert_eq!(b"cccddd", &splitted[..]); - - buf.unsplit(splitted); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_empty_other() { - let buf = Bytes::from(&b"aaabbbcccddd"[..]); - - // empty other - let other = Bytes::new(); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_empty_self() { - // empty self - let mut buf = Bytes::new(); - - let mut other = Bytes::with_capacity(64); - other.extend_from_slice(b"aaabbbcccddd"); - - buf.unsplit(other); - assert_eq!(b"aaabbbcccddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_arc_different() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeee"); - - buf.split_off(8); //arc - - let mut buf2 = Bytes::with_capacity(64); - buf2.extend_from_slice(b"ccccddddeeee"); - - buf2.split_off(8); //arc - - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_arc_non_contiguous() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbeeeeccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - - let buf3 = buf2.split_off(4); //arc - - buf.unsplit(buf3); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_two_split_offs() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"aaaabbbbccccdddd"); - - let mut buf2 = buf.split_off(8); //arc - let buf3 = buf2.split_off(4); //arc - - buf2.unsplit(buf3); - buf.unsplit(buf2); - assert_eq!(b"aaaabbbbccccdddd", &buf[..]); -} - -#[test] -fn bytes_unsplit_overlapping_references() { - let mut buf = Bytes::with_capacity(64); - buf.extend_from_slice(b"abcdefghijklmnopqrstuvwxyz"); - let mut buf0010 = buf.slice(0..10); - let buf1020 = buf.slice(10..20); - let buf0515 = buf.slice(5..15); - buf0010.unsplit(buf1020); - assert_eq!(b"abcdefghijklmnopqrst", &buf0010[..]); - assert_eq!(b"fghijklmno", &buf0515[..]); -} -*/ - #[test] fn bytes_mut_unsplit_basic() { let mut buf = BytesMut::with_capacity(64); From 536db06f168bdef967afbeac0561bf774e9a1315 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Thu, 14 Mar 2024 09:40:03 -0400 Subject: [PATCH 36/53] Use ManuallyDrop instead of mem::forget (#675) --- src/bytes_mut.rs | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 1b4a4d9..282aaa7 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -244,23 +244,22 @@ impl BytesMut { /// ``` #[inline] pub fn freeze(self) -> Bytes { - if self.kind() == KIND_VEC { + let bytes = ManuallyDrop::new(self); + if bytes.kind() == KIND_VEC { // Just re-use `Bytes` internal Vec vtable unsafe { - let off = self.get_vec_pos(); - let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off); - mem::forget(self); + let off = bytes.get_vec_pos(); + let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off); let mut b: Bytes = vec.into(); b.advance(off); b } } else { - debug_assert_eq!(self.kind(), KIND_ARC); + debug_assert_eq!(bytes.kind(), KIND_ARC); - let ptr = self.ptr.as_ptr(); - let len = self.len; - let data = AtomicPtr::new(self.data.cast()); - mem::forget(self); + let ptr = bytes.ptr.as_ptr(); + let len = bytes.len; + let data = AtomicPtr::new(bytes.data.cast()); unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) } } } @@ -829,11 +828,11 @@ impl BytesMut { // internal change could make a simple pattern (`BytesMut::from(vec)`) // suddenly a lot more expensive. #[inline] - pub(crate) fn from_vec(mut vec: Vec) -> BytesMut { + pub(crate) fn from_vec(vec: Vec) -> BytesMut { + let mut vec = ManuallyDrop::new(vec); let ptr = vptr(vec.as_mut_ptr()); let len = vec.len(); let cap = vec.capacity(); - mem::forget(vec); let original_capacity_repr = original_capacity_to_repr(cap); let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; @@ -1616,6 +1615,7 @@ impl PartialEq for BytesMut { impl From for Vec { fn from(bytes: BytesMut) -> Self { let kind = bytes.kind(); + let bytes = ManuallyDrop::new(bytes); let mut vec = if kind == KIND_VEC { unsafe { @@ -1632,7 +1632,7 @@ impl From for Vec { vec } else { - return bytes.deref().to_vec(); + return ManuallyDrop::into_inner(bytes).deref().to_vec(); } }; @@ -1643,8 +1643,6 @@ impl From for Vec { vec.set_len(len); } - mem::forget(bytes); - vec } } From ce8d8a0a029c0d296ade752ecc8c3e1ce9eee47f Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Fri, 22 Mar 2024 15:55:20 -0400 Subject: [PATCH 37/53] chore: prepare bytes v1.6.0 (#681) --- CHANGELOG.md | 37 +++++++++++++++++++++++++++++++++++++ Cargo.toml | 2 +- 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67b9f67..2335717 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,40 @@ +# 1.6.0 (March 22, 2024) + +### Added + +- Add `Bytes::is_unique` (#643) + +### Documented + +- Fix changelog typo (#628) +- Fix some spelling mistakes (#633) +- Typo fix (#637) +- Fix broken links (#639) +- Add security policy (#649) + +### Internal changes + +- Move comment to correct constant (#629) +- Various cleanup (#635) +- Simplify `UninitSlice::as_uninit_slice_mut()` logic (#644) +- Use `self.` instead of `Self::` (#642) +- `BytesMut`: Assert alignment of `Shared` (#652) +- Remove unnecessary namespace qualifier (#660) +- Remove an unnecessary else branch (#662) +- Remove unreachable else branch (#661) +- make parameter mut in `From` (#667) +- Restore commented tests (#665) +- Use `sub` instead of `offset` (#668) +- Calculate original capacity only if necessary (#666) +- `set_vec_pos` does not need a second parameter (#672) +- `get_vec_pos`: use `&self` instead of `&mut self` (#670) +- Refactor `split_at`/`split_to` (#663) +- Use `Iterator` from the prelude (#673) +- `copy_to_bytes`: Add panic section to docs (#676) +- Remove redundant reserve call (#674) +- Use `ManuallyDrop` instead of `mem::forget` (#675) + + # 1.5.0 (September 7, 2023) ### Added diff --git a/Cargo.toml b/Cargo.toml index 127d81d..793582a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -4,7 +4,7 @@ name = "bytes" # When releasing to crates.io: # - Update CHANGELOG.md. # - Create "v1.x.y" git tag. -version = "1.5.0" +version = "1.6.0" edition = "2018" rust-version = "1.39" license = "MIT" From 0d4cc7ffed2eadfb2028bade65b9ac0b6d231fc4 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Mon, 8 Apr 2024 11:05:04 -0400 Subject: [PATCH 38/53] Bytes: Use ManuallyDrop instead of mem::forget (#678) --- src/bytes.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 0b443c8..4a0a94f 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -1,6 +1,7 @@ use core::iter::FromIterator; +use core::mem::{self, ManuallyDrop}; use core::ops::{Deref, RangeBounds}; -use core::{cmp, fmt, hash, mem, ptr, slice, usize}; +use core::{cmp, fmt, hash, ptr, slice, usize}; use alloc::{ alloc::{dealloc, Layout}, @@ -828,13 +829,15 @@ impl From<&'static str> for Bytes { } impl From> for Bytes { - fn from(mut vec: Vec) -> Bytes { + fn from(vec: Vec) -> Bytes { + let mut vec = ManuallyDrop::new(vec); let ptr = vec.as_mut_ptr(); let len = vec.len(); let cap = vec.capacity(); // Avoid an extra allocation if possible. if len == cap { + let vec = ManuallyDrop::into_inner(vec); return Bytes::from(vec.into_boxed_slice()); } @@ -843,7 +846,6 @@ impl From> for Bytes { cap, ref_cnt: AtomicUsize::new(1), }); - mem::forget(vec); let shared = Box::into_raw(shared); // The pointer should be aligned, so this assert should @@ -900,7 +902,7 @@ impl From for Bytes { impl From for Vec { fn from(bytes: Bytes) -> Vec { - let bytes = mem::ManuallyDrop::new(bytes); + let bytes = ManuallyDrop::new(bytes); unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) } } } @@ -1116,11 +1118,11 @@ unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed) .is_ok() { - let buf = (*shared).buf; - let cap = (*shared).cap; - - // Deallocate Shared - drop(Box::from_raw(shared as *mut mem::ManuallyDrop)); + // Deallocate the `Shared` instance without running its destructor. + let shared = *Box::from_raw(shared); + let shared = ManuallyDrop::new(shared); + let buf = shared.buf; + let cap = shared.cap; // Copy back buffer ptr::copy(ptr, buf, len); From e4af48633cec419e8274571d353fe166d5e23a3e Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Tue, 9 Apr 2024 08:35:54 -0400 Subject: [PATCH 39/53] Don't set `len` in `BytesMut::reserve` (#682) A fundamental invariant of `reserve` is that it can extend capacity while the stored data remains the same, even if it's moved to a new allocation. As a result, `len` can never change during a call to `reserve`. --- src/bytes_mut.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 282aaa7..c9f5634 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -639,8 +639,8 @@ impl BytesMut { // Update the info self.ptr = vptr(v.as_mut_ptr().add(off)); - self.len = v.len() - off; self.cap = v.capacity() - off; + debug_assert_eq!(self.len, v.len() - off); } return; @@ -746,8 +746,8 @@ impl BytesMut { let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC; self.data = invalid_ptr(data); self.ptr = vptr(v.as_mut_ptr()); - self.len = v.len(); self.cap = v.capacity(); + debug_assert_eq!(self.len, v.len()); } /// Appends given bytes to this `BytesMut`. From 4eb62b912a199bef711e7e12243d972f4f0cdca8 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Wed, 10 Apr 2024 04:09:09 -0400 Subject: [PATCH 40/53] Bytes::split_to - check fast path first (#689) If `at == self.len()` then we already know `at <= self.len()`. If `at == 0`, it can't be greater than `self.len()`. --- src/bytes.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 4a0a94f..63c06ce 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -434,13 +434,6 @@ impl Bytes { /// Panics if `at > len`. #[must_use = "consider Bytes::advance if you don't need the other half"] pub fn split_to(&mut self, at: usize) -> Self { - assert!( - at <= self.len(), - "split_to out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - if at == self.len() { return mem::replace(self, Bytes::new()); } @@ -449,6 +442,13 @@ impl Bytes { return Bytes::new(); } + assert!( + at <= self.len(), + "split_to out of bounds: {:?} <= {:?}", + at, + self.len(), + ); + let mut ret = self.clone(); unsafe { self.inc_start(at) }; From b5fbfc3edb35a03ca560d29a0911e0495299575e Mon Sep 17 00:00:00 2001 From: tison Date: Wed, 10 Apr 2024 22:45:31 +0800 Subject: [PATCH 41/53] perf: improve Bytes::copy_to_bytes (#688) Signed-off-by: tison --- src/bytes.rs | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 63c06ce..c3240ce 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -582,13 +582,7 @@ impl Buf for Bytes { } fn copy_to_bytes(&mut self, len: usize) -> Self { - if len == self.remaining() { - core::mem::replace(self, Bytes::new()) - } else { - let ret = self.slice(..len); - self.advance(len); - ret - } + self.split_to(len) } } From 327615e5d4ba27e9647734d83ef9ad88d7dd8a38 Mon Sep 17 00:00:00 2001 From: Joseph Perez Date: Thu, 11 Apr 2024 11:45:18 +0200 Subject: [PATCH 42/53] test(benches): encloses bytes into `test::black_box` for clone benches (#691) Closes #690 Without it, it seems to me that compiler is able to inline the vtable, resulting in similar results for `clone_shared` and `clone_arg_vec`. --- benches/bytes.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/benches/bytes.rs b/benches/bytes.rs index 61d1e83..8782d00 100644 --- a/benches/bytes.rs +++ b/benches/bytes.rs @@ -47,7 +47,7 @@ fn clone_static(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { - test::black_box(&bytes.clone()); + test::black_box(test::black_box(&bytes).clone()); } }) } @@ -58,7 +58,7 @@ fn clone_shared(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { - test::black_box(&bytes.clone()); + test::black_box(test::black_box(&bytes).clone()); } }) } @@ -70,7 +70,7 @@ fn clone_arc_vec(b: &mut Bencher) { b.iter(|| { for _ in 0..1024 { - test::black_box(&bytes.clone()); + test::black_box(test::black_box(&bytes).clone()); } }) } From 4e2c9c065a06bf9cb5d7dd46e3b29f62a1c20057 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Wed, 17 Apr 2024 05:27:00 -0400 Subject: [PATCH 43/53] Truncate tweaks (#694) --- src/bytes_mut.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index c9f5634..0248df8 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -422,8 +422,9 @@ impl BytesMut { /// assert_eq!(buf, b"hello"[..]); /// ``` pub fn truncate(&mut self, len: usize) { - if len <= self.len() { + if len < self.len() { unsafe { + // SAFETY: Shrinking the buffer cannot expose uninitialized bytes. self.set_len(len); } } From 9d3ec1cffb76141b4706bb289beced8b04ecac4a Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Wed, 24 Apr 2024 05:49:53 -0400 Subject: [PATCH 44/53] Resize refactor (#696) * use checked_sub * return when additional == 0 * move safe operation out of unsafe block * use spare_capacity_mut instead of chunk_mut We don't need to check capacity because it's already been reserved above. * Add safety comments * refactor to use guard clauses This would be better written with let-else, but we won't get that until `MSRV >= 1.65.x`. * use if-let instead of unwrap * reduce scope of unsafe blocks Co-authored-by: Alice Ryhl --------- Co-authored-by: Alice Ryhl --- src/bytes_mut.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 0248df8..0ea0272 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -468,18 +468,26 @@ impl BytesMut { /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]); /// ``` pub fn resize(&mut self, new_len: usize, value: u8) { - let len = self.len(); - if new_len > len { - let additional = new_len - len; - self.reserve(additional); - unsafe { - let dst = self.chunk_mut().as_mut_ptr(); - ptr::write_bytes(dst, value, additional); - self.set_len(new_len); - } + let additional = if let Some(additional) = new_len.checked_sub(self.len()) { + additional } else { self.truncate(new_len); + return; + }; + + if additional == 0 { + return; } + + self.reserve(additional); + let dst = self.spare_capacity_mut().as_mut_ptr(); + // SAFETY: `spare_capacity_mut` returns a valid, properly aligned pointer and we've + // reserved enough space to write `additional` bytes. + unsafe { ptr::write_bytes(dst, value, additional) }; + + // SAFETY: There are at least `new_len` initialized bytes in the buffer so no + // uninitialized bytes are being exposed. + unsafe { self.set_len(new_len) }; } /// Sets the length of the buffer. From ce09d7d358ab1d1d31ed9d0b52a747c0a21ea401 Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Wed, 24 Apr 2024 08:23:39 -0400 Subject: [PATCH 45/53] Bytes::split_off - check fast path first (#693) Follow up to https://github.com/tokio-rs/bytes/pull/689 * If `at == self.len()`, we already know `at <= self.len()`. * If `at == 0`, we already know `at <= self.len()`. --- src/bytes.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index c3240ce..908cee9 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -385,13 +385,6 @@ impl Bytes { /// Panics if `at > len`. #[must_use = "consider Bytes::truncate if you don't need the other half"] pub fn split_off(&mut self, at: usize) -> Self { - assert!( - at <= self.len(), - "split_off out of bounds: {:?} <= {:?}", - at, - self.len(), - ); - if at == self.len() { return Bytes::new(); } @@ -400,6 +393,13 @@ impl Bytes { return mem::replace(self, Bytes::new()); } + assert!( + at <= self.len(), + "split_off out of bounds: {:?} <= {:?}", + at, + self.len(), + ); + let mut ret = self.clone(); self.len = at; From baa5053572ed9e88ca1058ec2b5a3f08046c5a40 Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Thu, 25 Apr 2024 09:08:16 +0200 Subject: [PATCH 46/53] Reuse capacity when possible in ::advance impl (#698) --- src/bytes_mut.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 0ea0272..35e1900 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -1066,6 +1066,14 @@ impl Buf for BytesMut { #[inline] fn advance(&mut self, cnt: usize) { + // Advancing by the length is the same as resetting the length to 0, + // except this way we get to reuse the full capacity. + if cnt == self.remaining() { + // SAFETY: Zero is not greater than the capacity. + unsafe { self.set_len(0) }; + return; + } + assert!( cnt <= self.remaining(), "cannot advance past `remaining`: {:?} <= {:?}", From a8806c245700e583134e67b7e0b87f1256b95bfa Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Thu, 25 Apr 2024 10:43:15 +0200 Subject: [PATCH 47/53] Improve BytesMut::split suggestion (#699) --- src/bytes_mut.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 35e1900..7576299 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -349,7 +349,7 @@ impl BytesMut { /// /// assert_eq!(other, b"hello world"[..]); /// ``` - #[must_use = "consider BytesMut::advance(len()) if you don't need the other half"] + #[must_use = "consider BytesMut::clear if you don't need the other half"] pub fn split(&mut self) -> BytesMut { let len = self.len(); self.split_to(len) From cb7f8449b5efc7022dc592b3a1d7dd33079f4c8f Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Fri, 26 Apr 2024 09:24:05 +0200 Subject: [PATCH 48/53] Tweak clear and truncate length modifications (#700) --- src/bytes_mut.rs | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 7576299..b01bb1a 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -422,11 +422,9 @@ impl BytesMut { /// assert_eq!(buf, b"hello"[..]); /// ``` pub fn truncate(&mut self, len: usize) { - if len < self.len() { - unsafe { - // SAFETY: Shrinking the buffer cannot expose uninitialized bytes. - self.set_len(len); - } + if len <= self.len() { + // SAFETY: Shrinking the buffer cannot expose uninitialized bytes. + unsafe { self.set_len(len) }; } } @@ -442,7 +440,8 @@ impl BytesMut { /// assert!(buf.is_empty()); /// ``` pub fn clear(&mut self) { - self.truncate(0); + // SAFETY: Setting the length to zero cannot expose uninitialized bytes. + unsafe { self.set_len(0) }; } /// Resizes the buffer so that `len` is equal to `new_len`. @@ -1069,8 +1068,7 @@ impl Buf for BytesMut { // Advancing by the length is the same as resetting the length to 0, // except this way we get to reuse the full capacity. if cnt == self.remaining() { - // SAFETY: Zero is not greater than the capacity. - unsafe { self.set_len(0) }; + self.clear(); return; } From 0c17e99283185b94ab68cdf0fb62da53cbd765ee Mon Sep 17 00:00:00 2001 From: Alice Ryhl Date: Sun, 5 May 2024 14:19:18 +0200 Subject: [PATCH 49/53] ci: silence unexpected-cfgs warnings due to `#[cfg(loom)]` (#703) --- src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/lib.rs b/src/lib.rs index 1b3e6fc..4dd1180 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(unknown_lints, unexpected_cfgs)] #![warn(missing_docs, missing_debug_implementations, rust_2018_idioms)] #![doc(test( no_crate_inject, From 86694b05649c0c1666044b2ba5c386c2328aac18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C3=89mile=20Fugulin?= Date: Sun, 5 May 2024 11:58:00 -0400 Subject: [PATCH 50/53] Add zero-copy make_mut (#695) --- src/bytes.rs | 150 +++++++++++++++++++++++++++++++++- src/bytes_mut.rs | 32 +++++++- tests/test_bytes.rs | 111 +++++++++++++++++++++++++ tests/test_bytes_odd_alloc.rs | 50 ++++++++++++ 4 files changed, 341 insertions(+), 2 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index 908cee9..b4359b0 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -15,7 +15,7 @@ use crate::buf::IntoIter; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::Buf; +use crate::{Buf, BytesMut}; /// A cheaply cloneable and sliceable chunk of contiguous memory. /// @@ -113,6 +113,7 @@ pub(crate) struct Vtable { /// /// takes `Bytes` to value pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec, + pub to_mut: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> BytesMut, /// fn(data) pub is_unique: unsafe fn(&AtomicPtr<()>) -> bool, /// fn(data, ptr, len) @@ -507,6 +508,49 @@ impl Bytes { self.truncate(0); } + /// Try to convert self into `BytesMut`. + /// + /// If `self` is unique for the entire original buffer, this will succeed + /// and return a `BytesMut` with the contents of `self` without copying. + /// If `self` is not unique for the entire original buffer, this will fail + /// and return self. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, BytesMut}; + /// + /// let bytes = Bytes::from(b"hello".to_vec()); + /// assert_eq!(bytes.try_into_mut(), Ok(BytesMut::from(&b"hello"[..]))); + /// ``` + pub fn try_into_mut(self) -> Result { + if self.is_unique() { + Ok(self.make_mut()) + } else { + Err(self) + } + } + + /// Convert self into `BytesMut`. + /// + /// If `self` is unique for the entire original buffer, this will return a + /// `BytesMut` with the contents of `self` without copying. + /// If `self` is not unique for the entire original buffer, this will make + /// a copy of `self` subset of the original buffer in a new `BytesMut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, BytesMut}; + /// + /// let bytes = Bytes::from(b"hello".to_vec()); + /// assert_eq!(bytes.make_mut(), BytesMut::from(&b"hello"[..])); + /// ``` + pub fn make_mut(self) -> BytesMut { + let bytes = ManuallyDrop::new(self); + unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) } + } + #[inline] pub(crate) unsafe fn with_vtable( ptr: *const u8, @@ -917,6 +961,7 @@ impl fmt::Debug for Vtable { const STATIC_VTABLE: Vtable = Vtable { clone: static_clone, to_vec: static_to_vec, + to_mut: static_to_mut, is_unique: static_is_unique, drop: static_drop, }; @@ -931,6 +976,11 @@ unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec, ptr: *const u8, len: usize) -> BytesMut { + let slice = slice::from_raw_parts(ptr, len); + BytesMut::from(slice) +} + fn static_is_unique(_: &AtomicPtr<()>) -> bool { false } @@ -944,6 +994,7 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) { static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { clone: promotable_even_clone, to_vec: promotable_even_to_vec, + to_mut: promotable_even_to_mut, is_unique: promotable_is_unique, drop: promotable_even_drop, }; @@ -951,6 +1002,7 @@ static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable { static PROMOTABLE_ODD_VTABLE: Vtable = Vtable { clone: promotable_odd_clone, to_vec: promotable_odd_to_vec, + to_mut: promotable_odd_to_mut, is_unique: promotable_is_unique, drop: promotable_odd_drop, }; @@ -994,12 +1046,47 @@ unsafe fn promotable_to_vec( } } +unsafe fn promotable_to_mut( + data: &AtomicPtr<()>, + ptr: *const u8, + len: usize, + f: fn(*mut ()) -> *mut u8, +) -> BytesMut { + let shared = data.load(Ordering::Acquire); + let kind = shared as usize & KIND_MASK; + + if kind == KIND_ARC { + shared_to_mut_impl(shared.cast(), ptr, len) + } else { + // KIND_VEC is a view of an underlying buffer at a certain offset. + // The ptr + len always represents the end of that buffer. + // Before truncating it, it is first promoted to KIND_ARC. + // Thus, we can safely reconstruct a Vec from it without leaking memory. + debug_assert_eq!(kind, KIND_VEC); + + let buf = f(shared); + let off = offset_from(ptr, buf); + let cap = off + len; + let v = Vec::from_raw_parts(buf, cap, cap); + + let mut b = BytesMut::from_vec(v); + b.advance_unchecked(off); + b + } +} + unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec { promotable_to_vec(data, ptr, len, |shared| { ptr_map(shared.cast(), |addr| addr & !KIND_MASK) }) } +unsafe fn promotable_even_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { + promotable_to_mut(data, ptr, len, |shared| { + ptr_map(shared.cast(), |addr| addr & !KIND_MASK) + }) +} + unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; @@ -1031,6 +1118,10 @@ unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize promotable_to_vec(data, ptr, len, |shared| shared.cast()) } +unsafe fn promotable_odd_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { + promotable_to_mut(data, ptr, len, |shared| shared.cast()) +} + unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) { data.with_mut(|shared| { let shared = *shared; @@ -1087,6 +1178,7 @@ const _: [(); 0 - mem::align_of::() % 2] = []; // Assert that the alignm static SHARED_VTABLE: Vtable = Vtable { clone: shared_clone, to_vec: shared_to_vec, + to_mut: shared_to_mut, is_unique: shared_is_unique, drop: shared_drop, }; @@ -1133,6 +1225,45 @@ unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len) } +unsafe fn shared_to_mut_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> BytesMut { + // The goal is to check if the current handle is the only handle + // that currently has access to the buffer. This is done by + // checking if the `ref_cnt` is currently 1. + // + // The `Acquire` ordering synchronizes with the `Release` as + // part of the `fetch_sub` in `release_shared`. The `fetch_sub` + // operation guarantees that any mutations done in other threads + // are ordered before the `ref_cnt` is decremented. As such, + // this `Acquire` will guarantee that those mutations are + // visible to the current thread. + // + // Otherwise, we take the other branch, copy the data and call `release_shared`. + if (*shared).ref_cnt.load(Ordering::Acquire) == 1 { + // Deallocate the `Shared` instance without running its destructor. + let shared = *Box::from_raw(shared); + let shared = ManuallyDrop::new(shared); + let buf = shared.buf; + let cap = shared.cap; + + // Rebuild Vec + let off = offset_from(ptr, buf); + let v = Vec::from_raw_parts(buf, len + off, cap); + + let mut b = BytesMut::from_vec(v); + b.advance_unchecked(off); + b + } else { + // Copy the data from Shared in a new Vec, then release it + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + BytesMut::from_vec(v) + } +} + +unsafe fn shared_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { + shared_to_mut_impl(data.load(Ordering::Relaxed).cast(), ptr, len) +} + pub(crate) unsafe fn shared_is_unique(data: &AtomicPtr<()>) -> bool { let shared = data.load(Ordering::Acquire); let ref_cnt = (*shared.cast::()).ref_cnt.load(Ordering::Relaxed); @@ -1291,6 +1422,23 @@ where new_addr as *mut u8 } +/// Precondition: dst >= original +/// +/// The following line is equivalent to: +/// +/// ```rust,ignore +/// self.ptr.as_ptr().offset_from(ptr) as usize; +/// ``` +/// +/// But due to min rust is 1.39 and it is only stabilized +/// in 1.47, we cannot use it. +#[inline] +fn offset_from(dst: *const u8, original: *const u8) -> usize { + debug_assert!(dst >= original); + + dst as usize - original as usize +} + // compile-fails /// ```compile_fail diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index b01bb1a..569f8be 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -868,7 +868,7 @@ impl BytesMut { /// # SAFETY /// /// The caller must ensure that `count` <= `self.cap`. - unsafe fn advance_unchecked(&mut self, count: usize) { + pub(crate) unsafe fn advance_unchecked(&mut self, count: usize) { // Setting the start to 0 is a no-op, so return early if this is the // case. if count == 0 { @@ -1713,6 +1713,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) static SHARED_VTABLE: Vtable = Vtable { clone: shared_v_clone, to_vec: shared_v_to_vec, + to_mut: shared_v_to_mut, is_unique: crate::bytes::shared_is_unique, drop: shared_v_drop, }; @@ -1747,6 +1748,35 @@ unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> V } } +unsafe fn shared_v_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut { + let shared: *mut Shared = data.load(Ordering::Relaxed).cast(); + + if (*shared).is_unique() { + let shared = &mut *shared; + + // The capacity is always the original capacity of the buffer + // minus the offset from the start of the buffer + let v = &mut shared.vec; + let v_capacity = v.capacity(); + let v_ptr = v.as_mut_ptr(); + let offset = offset_from(ptr as *mut u8, v_ptr); + let cap = v_capacity - offset; + + let ptr = vptr(ptr as *mut u8); + + BytesMut { + ptr, + len, + cap, + data: shared, + } + } else { + let v = slice::from_raw_parts(ptr, len).to_vec(); + release_shared(shared); + BytesMut::from_vec(v) + } +} + unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) { data.with_mut(|shared| { release_shared(*shared as *mut Shared); diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 84c3d5a..2f283af 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1172,3 +1172,114 @@ fn shared_is_unique() { drop(b); assert!(c.is_unique()); } + +#[test] +fn test_bytes_make_mut_static() { + let bs = b"1b23exfcz3r"; + + // Test STATIC_VTABLE.to_mut + let bytes_mut = Bytes::from_static(bs).make_mut(); + assert_eq!(bytes_mut, bs[..]); +} + +#[test] +fn test_bytes_make_mut_bytes_mut_vec() { + let bs = b"1b23exfcz3r"; + let bs_long = b"1b23exfcz3r1b23exfcz3r"; + + // Test case where kind == KIND_VEC + let mut bytes_mut: BytesMut = bs[..].into(); + bytes_mut = bytes_mut.freeze().make_mut(); + assert_eq!(bytes_mut, bs[..]); + bytes_mut.extend_from_slice(&bs[..]); + assert_eq!(bytes_mut, bs_long[..]); +} + +#[test] +fn test_bytes_make_mut_bytes_mut_shared() { + let bs = b"1b23exfcz3r"; + + // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE + let mut bytes_mut: BytesMut = bs[..].into(); + drop(bytes_mut.split_off(bs.len())); + + let b1 = bytes_mut.freeze(); + let b2 = b1.clone(); + + // shared.is_unique() = False + let mut b1m = b1.make_mut(); + assert_eq!(b1m, bs[..]); + b1m[0] = b'9'; + + // shared.is_unique() = True + let b2m = b2.make_mut(); + assert_eq!(b2m, bs[..]); +} + +#[test] +fn test_bytes_make_mut_bytes_mut_offset() { + let bs = b"1b23exfcz3r"; + + // Test bytes_mut.SHARED_VTABLE.to_mut impl where offset != 0 + let mut bytes_mut1: BytesMut = bs[..].into(); + let bytes_mut2 = bytes_mut1.split_off(9); + + let b1 = bytes_mut1.freeze(); + let b2 = bytes_mut2.freeze(); + + let b1m = b1.make_mut(); + let b2m = b2.make_mut(); + + assert_eq!(b2m, bs[9..]); + assert_eq!(b1m, bs[..9]); +} + +#[test] +fn test_bytes_make_mut_promotable_even_vec() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_VEC + let b1 = Bytes::from(vec.clone()); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); +} + +#[test] +fn test_bytes_make_mut_promotable_even_arc_1() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_ARC, ref_cnt == 1 + let b1 = Bytes::from(vec.clone()); + drop(b1.clone()); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); +} + +#[test] +fn test_bytes_make_mut_promotable_even_arc_2() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_ARC, ref_cnt == 2 + let b1 = Bytes::from(vec.clone()); + let b2 = b1.clone(); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); + + // Test case where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 + let b2m = b2.make_mut(); + assert_eq!(b2m, vec); +} + +#[test] +fn test_bytes_make_mut_promotable_even_arc_offset() { + let vec = vec![33u8; 1024]; + + // Test case where offset != 0 + let mut b1 = Bytes::from(vec.clone()); + let b2 = b1.split_off(20); + let b1m = b1.make_mut(); + let b2m = b2.make_mut(); + + assert_eq!(b2m, vec[20..]); + assert_eq!(b1m, vec[..20]); +} diff --git a/tests/test_bytes_odd_alloc.rs b/tests/test_bytes_odd_alloc.rs index 27ed877..8008a0e 100644 --- a/tests/test_bytes_odd_alloc.rs +++ b/tests/test_bytes_odd_alloc.rs @@ -95,3 +95,53 @@ fn test_bytes_into_vec() { assert_eq!(Vec::from(b2), vec[20..]); assert_eq!(Vec::from(b1), vec[..20]); } + +#[test] +fn test_bytes_make_mut_vec() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_VEC + let b1 = Bytes::from(vec.clone()); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); +} + +#[test] +fn test_bytes_make_mut_arc_1() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_ARC, ref_cnt == 1 + let b1 = Bytes::from(vec.clone()); + drop(b1.clone()); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); +} + +#[test] +fn test_bytes_make_mut_arc_2() { + let vec = vec![33u8; 1024]; + + // Test case where kind == KIND_ARC, ref_cnt == 2 + let b1 = Bytes::from(vec.clone()); + let b2 = b1.clone(); + let b1m = b1.make_mut(); + assert_eq!(b1m, vec); + + // Test case where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 + let b2m = b2.make_mut(); + assert_eq!(b2m, vec); +} + +#[test] +fn test_bytes_make_mut_arc_offset() { + let vec = vec![33u8; 1024]; + + // Test case where offset != 0 + let mut b1 = Bytes::from(vec.clone()); + let b2 = b1.split_off(20); + let b1m = b1.make_mut(); + let b2m = b2.make_mut(); + + assert_eq!(b2m, vec[20..]); + assert_eq!(b1m, vec[..20]); +} From 4950c503768fcebce6f9ab9dbaac2a7da30b35ba Mon Sep 17 00:00:00 2001 From: Brad Dunbar Date: Sat, 11 May 2024 13:41:50 -0400 Subject: [PATCH 51/53] Offset from (#705) --- src/bytes.rs | 25 ++++--------------------- src/bytes_mut.rs | 19 +------------------ src/lib.rs | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 39 deletions(-) diff --git a/src/bytes.rs b/src/bytes.rs index b4359b0..e23d9a8 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -15,7 +15,7 @@ use crate::buf::IntoIter; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::{Buf, BytesMut}; +use crate::{offset_from, Buf, BytesMut}; /// A cheaply cloneable and sliceable chunk of contiguous memory. /// @@ -1037,7 +1037,7 @@ unsafe fn promotable_to_vec( let buf = f(shared); - let cap = (ptr as usize - buf as usize) + len; + let cap = offset_from(ptr, buf) + len; // Copy back buffer ptr::copy(ptr, buf, len); @@ -1150,7 +1150,7 @@ unsafe fn promotable_is_unique(data: &AtomicPtr<()>) -> bool { } unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) { - let cap = (offset as usize - buf as usize) + len; + let cap = offset_from(offset, buf) + len; dealloc(buf, Layout::from_size_align(cap, 1).unwrap()) } @@ -1312,7 +1312,7 @@ unsafe fn shallow_clone_vec( // vector. let shared = Box::new(Shared { buf, - cap: (offset as usize - buf as usize) + len, + cap: offset_from(offset, buf) + len, // Initialize refcount to 2. One for this reference, and one // for the new clone that will be returned from // `shallow_clone`. @@ -1422,23 +1422,6 @@ where new_addr as *mut u8 } -/// Precondition: dst >= original -/// -/// The following line is equivalent to: -/// -/// ```rust,ignore -/// self.ptr.as_ptr().offset_from(ptr) as usize; -/// ``` -/// -/// But due to min rust is 1.39 and it is only stabilized -/// in 1.47, we cannot use it. -#[inline] -fn offset_from(dst: *const u8, original: *const u8) -> usize { - debug_assert!(dst >= original); - - dst as usize - original as usize -} - // compile-fails /// ```compile_fail diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs index 569f8be..537f01a 100644 --- a/src/bytes_mut.rs +++ b/src/bytes_mut.rs @@ -17,7 +17,7 @@ use crate::bytes::Vtable; #[allow(unused)] use crate::loom::sync::atomic::AtomicMut; use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering}; -use crate::{Buf, BufMut, Bytes}; +use crate::{offset_from, Buf, BufMut, Bytes}; /// A unique reference to a contiguous slice of memory. /// @@ -1683,23 +1683,6 @@ fn invalid_ptr(addr: usize) -> *mut T { ptr.cast::() } -/// Precondition: dst >= original -/// -/// The following line is equivalent to: -/// -/// ```rust,ignore -/// self.ptr.as_ptr().offset_from(ptr) as usize; -/// ``` -/// -/// But due to min rust is 1.39 and it is only stabilized -/// in 1.47, we cannot use it. -#[inline] -fn offset_from(dst: *mut u8, original: *mut u8) -> usize { - debug_assert!(dst >= original); - - dst as usize - original as usize -} - unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec { let ptr = ptr.sub(off); len += off; diff --git a/src/lib.rs b/src/lib.rs index 4dd1180..7ddd220 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -148,3 +148,18 @@ fn panic_does_not_fit(size: usize, nbytes: usize) -> ! { size, nbytes ); } + +/// Precondition: dst >= original +/// +/// The following line is equivalent to: +/// +/// ```rust,ignore +/// self.ptr.as_ptr().offset_from(ptr) as usize; +/// ``` +/// +/// But due to min rust is 1.39 and it is only stabilized +/// in 1.47, we cannot use it. +#[inline] +fn offset_from(dst: *const u8, original: *const u8) -> usize { + dst as usize - original as usize +} From caf520ac7f2c466d26bd88eca33ddc53c408e17e Mon Sep 17 00:00:00 2001 From: Paolo Barbolini Date: Sun, 19 May 2024 21:28:03 +0200 Subject: [PATCH 52/53] Fix iter tests to use the actual bytes IntoIter instead of std (#707) --- tests/test_iter.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_iter.rs b/tests/test_iter.rs index a5bfddd..bad9018 100644 --- a/tests/test_iter.rs +++ b/tests/test_iter.rs @@ -1,11 +1,11 @@ #![warn(rust_2018_idioms)] -use bytes::Bytes; +use bytes::{buf::IntoIter, Bytes}; #[test] fn iter_len() { let buf = Bytes::from_static(b"hello world"); - let iter = buf.iter(); + let iter = IntoIter::new(buf); assert_eq!(iter.size_hint(), (11, Some(11))); assert_eq!(iter.len(), 11); @@ -13,8 +13,8 @@ fn iter_len() { #[test] fn empty_iter_len() { - let buf = Bytes::from_static(b""); - let iter = buf.iter(); + let buf = Bytes::new(); + let iter = IntoIter::new(buf); assert_eq!(iter.size_hint(), (0, Some(0))); assert_eq!(iter.len(), 0); From fa1daac3ae1dcb07dffe3a41a041dffd6edf177b Mon Sep 17 00:00:00 2001 From: Anthony Ramine <123095+nox@users.noreply.github.com> Date: Tue, 28 May 2024 10:14:02 +0200 Subject: [PATCH 53/53] Change Bytes::make_mut to impl From for BytesMut (closes #709) (#710) >::make_mut returns a &mut T, such an API is doable for Bytes too and thus we should reserve Bytes::make_mut for that. Furthermore, it would be helpful to use From as a trait bound in some cases with other traits such as Hyper's body trait, where Hyper gives you Bytes values. Finally, making it impl From for BytesMut means the API is more easily discoverable as it appears on both Bytes and BytesMut. --- src/bytes.rs | 44 ++++++++++++++++++----------------- src/bytes/promotable.rs | 0 tests/test_bytes.rs | 40 +++++++++++++++---------------- tests/test_bytes_odd_alloc.rs | 22 +++++++++--------- 4 files changed, 54 insertions(+), 52 deletions(-) create mode 100644 src/bytes/promotable.rs diff --git a/src/bytes.rs b/src/bytes.rs index e23d9a8..e0c33b3 100644 --- a/src/bytes.rs +++ b/src/bytes.rs @@ -525,32 +525,12 @@ impl Bytes { /// ``` pub fn try_into_mut(self) -> Result { if self.is_unique() { - Ok(self.make_mut()) + Ok(self.into()) } else { Err(self) } } - /// Convert self into `BytesMut`. - /// - /// If `self` is unique for the entire original buffer, this will return a - /// `BytesMut` with the contents of `self` without copying. - /// If `self` is not unique for the entire original buffer, this will make - /// a copy of `self` subset of the original buffer in a new `BytesMut`. - /// - /// # Examples - /// - /// ``` - /// use bytes::{Bytes, BytesMut}; - /// - /// let bytes = Bytes::from(b"hello".to_vec()); - /// assert_eq!(bytes.make_mut(), BytesMut::from(&b"hello"[..])); - /// ``` - pub fn make_mut(self) -> BytesMut { - let bytes = ManuallyDrop::new(self); - unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) } - } - #[inline] pub(crate) unsafe fn with_vtable( ptr: *const u8, @@ -932,6 +912,28 @@ impl From> for Bytes { } } +impl From for BytesMut { + /// Convert self into `BytesMut`. + /// + /// If `bytes` is unique for the entire original buffer, this will return a + /// `BytesMut` with the contents of `bytes` without copying. + /// If `bytes` is not unique for the entire original buffer, this will make + /// a copy of `bytes` subset of the original buffer in a new `BytesMut`. + /// + /// # Examples + /// + /// ``` + /// use bytes::{Bytes, BytesMut}; + /// + /// let bytes = Bytes::from(b"hello".to_vec()); + /// assert_eq!(BytesMut::from(bytes), BytesMut::from(&b"hello"[..])); + /// ``` + fn from(bytes: Bytes) -> Self { + let bytes = ManuallyDrop::new(bytes); + unsafe { (bytes.vtable.to_mut)(&bytes.data, bytes.ptr, bytes.len) } + } +} + impl From for Bytes { fn from(s: String) -> Bytes { Bytes::from(s.into_bytes()) diff --git a/src/bytes/promotable.rs b/src/bytes/promotable.rs new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs index 2f283af..3ac4298 100644 --- a/tests/test_bytes.rs +++ b/tests/test_bytes.rs @@ -1174,29 +1174,29 @@ fn shared_is_unique() { } #[test] -fn test_bytes_make_mut_static() { +fn test_bytesmut_from_bytes_static() { let bs = b"1b23exfcz3r"; // Test STATIC_VTABLE.to_mut - let bytes_mut = Bytes::from_static(bs).make_mut(); + let bytes_mut = BytesMut::from(Bytes::from_static(bs)); assert_eq!(bytes_mut, bs[..]); } #[test] -fn test_bytes_make_mut_bytes_mut_vec() { +fn test_bytesmut_from_bytes_bytes_mut_vec() { let bs = b"1b23exfcz3r"; let bs_long = b"1b23exfcz3r1b23exfcz3r"; // Test case where kind == KIND_VEC let mut bytes_mut: BytesMut = bs[..].into(); - bytes_mut = bytes_mut.freeze().make_mut(); + bytes_mut = BytesMut::from(bytes_mut.freeze()); assert_eq!(bytes_mut, bs[..]); bytes_mut.extend_from_slice(&bs[..]); assert_eq!(bytes_mut, bs_long[..]); } #[test] -fn test_bytes_make_mut_bytes_mut_shared() { +fn test_bytesmut_from_bytes_bytes_mut_shared() { let bs = b"1b23exfcz3r"; // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE @@ -1207,17 +1207,17 @@ fn test_bytes_make_mut_bytes_mut_shared() { let b2 = b1.clone(); // shared.is_unique() = False - let mut b1m = b1.make_mut(); + let mut b1m = BytesMut::from(b1); assert_eq!(b1m, bs[..]); b1m[0] = b'9'; // shared.is_unique() = True - let b2m = b2.make_mut(); + let b2m = BytesMut::from(b2); assert_eq!(b2m, bs[..]); } #[test] -fn test_bytes_make_mut_bytes_mut_offset() { +fn test_bytesmut_from_bytes_bytes_mut_offset() { let bs = b"1b23exfcz3r"; // Test bytes_mut.SHARED_VTABLE.to_mut impl where offset != 0 @@ -1227,58 +1227,58 @@ fn test_bytes_make_mut_bytes_mut_offset() { let b1 = bytes_mut1.freeze(); let b2 = bytes_mut2.freeze(); - let b1m = b1.make_mut(); - let b2m = b2.make_mut(); + let b1m = BytesMut::from(b1); + let b2m = BytesMut::from(b2); assert_eq!(b2m, bs[9..]); assert_eq!(b1m, bs[..9]); } #[test] -fn test_bytes_make_mut_promotable_even_vec() { +fn test_bytesmut_from_bytes_promotable_even_vec() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_VEC let b1 = Bytes::from(vec.clone()); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); } #[test] -fn test_bytes_make_mut_promotable_even_arc_1() { +fn test_bytesmut_from_bytes_promotable_even_arc_1() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_ARC, ref_cnt == 1 let b1 = Bytes::from(vec.clone()); drop(b1.clone()); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); } #[test] -fn test_bytes_make_mut_promotable_even_arc_2() { +fn test_bytesmut_from_bytes_promotable_even_arc_2() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_ARC, ref_cnt == 2 let b1 = Bytes::from(vec.clone()); let b2 = b1.clone(); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); // Test case where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 - let b2m = b2.make_mut(); + let b2m = BytesMut::from(b2); assert_eq!(b2m, vec); } #[test] -fn test_bytes_make_mut_promotable_even_arc_offset() { +fn test_bytesmut_from_bytes_promotable_even_arc_offset() { let vec = vec![33u8; 1024]; // Test case where offset != 0 let mut b1 = Bytes::from(vec.clone()); let b2 = b1.split_off(20); - let b1m = b1.make_mut(); - let b2m = b2.make_mut(); + let b1m = BytesMut::from(b1); + let b2m = BytesMut::from(b2); assert_eq!(b2m, vec[20..]); assert_eq!(b1m, vec[..20]); diff --git a/tests/test_bytes_odd_alloc.rs b/tests/test_bytes_odd_alloc.rs index 8008a0e..4758dc2 100644 --- a/tests/test_bytes_odd_alloc.rs +++ b/tests/test_bytes_odd_alloc.rs @@ -6,7 +6,7 @@ use std::alloc::{GlobalAlloc, Layout, System}; use std::ptr; -use bytes::Bytes; +use bytes::{Bytes, BytesMut}; #[global_allocator] static ODD: Odd = Odd; @@ -97,50 +97,50 @@ fn test_bytes_into_vec() { } #[test] -fn test_bytes_make_mut_vec() { +fn test_bytesmut_from_bytes_vec() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_VEC let b1 = Bytes::from(vec.clone()); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); } #[test] -fn test_bytes_make_mut_arc_1() { +fn test_bytesmut_from_bytes_arc_1() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_ARC, ref_cnt == 1 let b1 = Bytes::from(vec.clone()); drop(b1.clone()); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); } #[test] -fn test_bytes_make_mut_arc_2() { +fn test_bytesmut_from_bytes_arc_2() { let vec = vec![33u8; 1024]; // Test case where kind == KIND_ARC, ref_cnt == 2 let b1 = Bytes::from(vec.clone()); let b2 = b1.clone(); - let b1m = b1.make_mut(); + let b1m = BytesMut::from(b1); assert_eq!(b1m, vec); // Test case where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1 - let b2m = b2.make_mut(); + let b2m = BytesMut::from(b2); assert_eq!(b2m, vec); } #[test] -fn test_bytes_make_mut_arc_offset() { +fn test_bytesmut_from_bytes_arc_offset() { let vec = vec![33u8; 1024]; // Test case where offset != 0 let mut b1 = Bytes::from(vec.clone()); let b2 = b1.split_off(20); - let b1m = b1.make_mut(); - let b2m = b2.make_mut(); + let b1m = BytesMut::from(b1); + let b2m = BytesMut::from(b2); assert_eq!(b2m, vec[20..]); assert_eq!(b1m, vec[..20]);