diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index f8fad6de1a3cc..b2ef867bc2ed9 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -10,9 +10,10 @@ use core::cmp::{self, Ordering}; use core::fmt; use core::hash::{Hash, Hasher}; +use core::intrinsics::size_of; use core::iter::{repeat_with, FromIterator}; use core::marker::PhantomData; -use core::mem::{self, ManuallyDrop}; +use core::mem::ManuallyDrop; use core::ops::{Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice; @@ -58,7 +59,7 @@ mod tests; const INITIAL_CAPACITY: usize = 7; // 2^3 - 1 const MINIMUM_CAPACITY: usize = 1; // 2 - 1 -const MAXIMUM_ZST_CAPACITY: usize = 1 << (core::mem::size_of::() * 8 - 1); // Largest possible power of two +const MAXIMUM_ZST_CAPACITY: usize = 1 << (size_of::() * 8 - 1); // Largest possible power of two /// A double-ended queue implemented with a growable ring buffer. /// @@ -157,7 +158,7 @@ impl VecDeque { /// Marginally more convenient #[inline] fn cap(&self) -> usize { - if mem::size_of::() == 0 { + if size_of::() == 0 { // For zero sized types, we are always at maximum capacity MAXIMUM_ZST_CAPACITY } else { @@ -2795,7 +2796,7 @@ impl From> for VecDeque { // because `usize::MAX` (the capacity returned by `capacity()` for ZST) // is not a power of two and thus it'll always try // to reserve more memory which will panic for ZST (rust-lang/rust#78532) - if (!buf.capacity().is_power_of_two() && mem::size_of::() != 0) + if (!buf.capacity().is_power_of_two() && size_of::() != 0) || (buf.capacity() < (MINIMUM_CAPACITY + 1)) || (buf.capacity() == len) { diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 36b7efc33a874..4576b6f174419 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -3,8 +3,8 @@ use core::alloc::LayoutError; use core::cmp; -use core::intrinsics; -use core::mem::{self, ManuallyDrop, MaybeUninit}; +use core::intrinsics::{self, min_align_of as align_of, size_of}; +use core::mem::{ManuallyDrop, MaybeUninit}; use core::ops::Drop; use core::ptr::{self, NonNull, Unique}; use core::slice; @@ -171,7 +171,7 @@ impl RawVec { } fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { - if mem::size_of::() == 0 { + if size_of::() == 0 { Self::new_in(alloc) } else { // We avoid `unwrap_or_else` here because it bloats the amount of @@ -228,7 +228,7 @@ impl RawVec { /// This will always be `usize::MAX` if `T` is zero-sized. #[inline(always)] pub fn capacity(&self) -> usize { - if mem::size_of::() == 0 { usize::MAX } else { self.cap } + if size_of::() == 0 { usize::MAX } else { self.cap } } /// Returns a shared reference to the allocator backing this `RawVec`. @@ -237,14 +237,14 @@ impl RawVec { } fn current_memory(&self) -> Option<(NonNull, Layout)> { - if mem::size_of::() == 0 || self.cap == 0 { + if size_of::() == 0 || self.cap == 0 { None } else { // We have an allocated chunk of memory, so we can bypass runtime // checks to get our current layout. unsafe { - let align = mem::align_of::(); - let size = mem::size_of::() * self.cap; + let align = align_of::(); + let size = size_of::() * self.cap; let layout = Layout::from_size_align_unchecked(size, align); Some((self.ptr.cast().into(), layout)) } @@ -367,8 +367,8 @@ impl RawVec { } fn capacity_from_bytes(excess: usize) -> usize { - debug_assert_ne!(mem::size_of::(), 0); - excess / mem::size_of::() + debug_assert_ne!(size_of::(), 0); + excess / size_of::() } fn set_ptr(&mut self, ptr: NonNull<[u8]>) { @@ -387,7 +387,7 @@ impl RawVec { // This is ensured by the calling contexts. debug_assert!(additional > 0); - if mem::size_of::() == 0 { + if size_of::() == 0 { // Since we return a capacity of `usize::MAX` when `elem_size` is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow); @@ -406,7 +406,7 @@ impl RawVec { // - 4 if elements are moderate-sized (<= 1 KiB). // - 1 otherwise, to avoid wasting too much space for very short Vecs. // Note that `min_non_zero_cap` is computed statically. - let elem_size = mem::size_of::(); + let elem_size = size_of::(); let min_non_zero_cap = if elem_size == 1 { 8 } else if elem_size <= 1024 { @@ -428,7 +428,7 @@ impl RawVec { // `grow_amortized`, but this method is usually instantiated less often so // it's less critical. fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if mem::size_of::() == 0 { + if size_of::() == 0 { // Since we return a capacity of `usize::MAX` when the type size is // 0, getting to here necessarily means the `RawVec` is overfull. return Err(CapacityOverflow); @@ -447,7 +447,7 @@ impl RawVec { assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity"); let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) }; - let new_size = amount * mem::size_of::(); + let new_size = amount * size_of::(); let ptr = unsafe { let new_layout = Layout::from_size_align_unchecked(new_size, layout.align()); diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index cb015b949305c..4481b0d47f370 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -84,7 +84,8 @@ use core::borrow::{Borrow, BorrowMut}; use core::cmp::Ordering::{self, Less}; -use core::mem::{self, size_of}; +use core::intrinsics::size_of; +use core::mem; use core::ptr; use crate::alloc::{Allocator, Global}; @@ -411,10 +412,10 @@ impl [T] { }}; } - let sz_u8 = mem::size_of::<(K, u8)>(); - let sz_u16 = mem::size_of::<(K, u16)>(); - let sz_u32 = mem::size_of::<(K, u32)>(); - let sz_usize = mem::size_of::<(K, usize)>(); + let sz_u8 = size_of::<(K, u8)>(); + let sz_u16 = size_of::<(K, u16)>(); + let sz_u32 = size_of::<(K, u32)>(); + let sz_usize = size_of::<(K, usize)>(); let len = self.len(); if len < 2 { @@ -1004,7 +1005,7 @@ where impl Drop for MergeHole { fn drop(&mut self) { // `T` is not a zero-sized type, so it's okay to divide by its size. - let len = (self.end as usize - self.start as usize) / mem::size_of::(); + let len = (self.end as usize - self.start as usize) / size_of::(); unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); } diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index f131d06bb18f9..006cce159db66 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,7 +1,7 @@ use crate::alloc::{Allocator, Global}; use crate::raw_vec::RawVec; use core::fmt; -use core::intrinsics::arith_offset; +use core::intrinsics::{arith_offset, size_of}; use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess}; use core::marker::PhantomData; use core::mem::{self}; @@ -122,7 +122,7 @@ impl Iterator for IntoIter { fn next(&mut self) -> Option { if self.ptr as *const _ == self.end { None - } else if mem::size_of::() == 0 { + } else if size_of::() == 0 { // purposefully don't use 'ptr.offset' because for // vectors with 0-size elements this would return the // same pointer. @@ -140,7 +140,7 @@ impl Iterator for IntoIter { #[inline] fn size_hint(&self) -> (usize, Option) { - let exact = if mem::size_of::() == 0 { + let exact = if size_of::() == 0 { (self.end as usize).wrapping_sub(self.ptr as usize) } else { unsafe { self.end.offset_from(self.ptr) as usize } @@ -166,7 +166,7 @@ impl Iterator for IntoIter { // that `T: Copy` so reading elements from the buffer doesn't invalidate // them for `Drop`. unsafe { - if mem::size_of::() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } + if size_of::() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } } } } @@ -177,7 +177,7 @@ impl DoubleEndedIterator for IntoIter { fn next_back(&mut self) -> Option { if self.end == self.ptr { None - } else if mem::size_of::() == 0 { + } else if size_of::() == 0 { // See above for why 'ptr.offset' isn't used self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T }; diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 2a83eb33fe3ec..c34ef8d8f46d6 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -57,7 +57,7 @@ use core::cmp::{self, Ordering}; use core::convert::TryFrom; use core::fmt; use core::hash::{Hash, Hasher}; -use core::intrinsics::{arith_offset, assume}; +use core::intrinsics::{arith_offset, assume, size_of}; use core::iter::FromIterator; use core::marker::PhantomData; use core::mem::{self, ManuallyDrop, MaybeUninit}; @@ -2105,7 +2105,7 @@ impl IntoIterator for Vec { let mut me = ManuallyDrop::new(self); let alloc = ptr::read(me.allocator()); let begin = me.as_mut_ptr(); - let end = if mem::size_of::() == 0 { + let end = if size_of::() == 0 { arith_offset(begin as *const i8, me.len() as isize) as *const T } else { begin.add(me.len()) as *const T diff --git a/library/alloc/src/vec/source_iter_marker.rs b/library/alloc/src/vec/source_iter_marker.rs index 8c0e95559fa15..4ccf763ef894c 100644 --- a/library/alloc/src/vec/source_iter_marker.rs +++ b/library/alloc/src/vec/source_iter_marker.rs @@ -1,6 +1,7 @@ +use core::intrinsics::{min_align_of as align_of, size_of}; use core::iter::{InPlaceIterable, SourceIter}; -use core::mem::{self, ManuallyDrop}; -use core::ptr::{self}; +use core::mem::ManuallyDrop; +use core::ptr; use super::{AsIntoIter, InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec}; @@ -31,11 +32,9 @@ where // a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic // b) size match as required by Alloc contract // c) alignments match as required by Alloc contract - if mem::size_of::() == 0 - || mem::size_of::() - != mem::size_of::<<::Source as AsIntoIter>::Item>() - || mem::align_of::() - != mem::align_of::<<::Source as AsIntoIter>::Item>() + if size_of::() == 0 + || size_of::() != size_of::<<::Source as AsIntoIter>::Item>() + || align_of::() != align_of::<<::Source as AsIntoIter>::Item>() { // fallback to more generic implementations return SpecFromIterNested::from_iter(iterator); diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index c572c66ce328b..bea4dc4b1e979 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -1,19 +1,11 @@ use crate::cmp; use crate::fmt; -use crate::mem; +use crate::intrinsics::{ + min_align_of as align_of, min_align_of_val as align_of_val, size_of, size_of_val, +}; use crate::num::NonZeroUsize; use crate::ptr::NonNull; -// While this function is used in one place and its implementation -// could be inlined, the previous attempts to do so made rustc -// slower: -// -// * https://github.com/rust-lang/rust/pull/72189 -// * https://github.com/rust-lang/rust/pull/79827 -const fn size_align() -> (usize, usize) { - (mem::size_of::(), mem::align_of::()) -} - /// Layout of a block of memory. /// /// An instance of `Layout` describes a particular layout of memory. @@ -121,7 +113,8 @@ impl Layout { #[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")] #[inline] pub const fn new() -> Self { - let (size, align) = size_align::(); + let size = size_of::(); + let align = align_of::(); // SAFETY: the align is guaranteed by Rust to be a power of two and // the size+align combo is guaranteed to fit in our address space. As a // result use the unchecked constructor here to avoid inserting code @@ -135,7 +128,8 @@ impl Layout { #[stable(feature = "alloc_layout", since = "1.28.0")] #[inline] pub fn for_value(t: &T) -> Self { - let (size, align) = (mem::size_of_val(t), mem::align_of_val(t)); + let size = size_of_val(t); + let align = align_of_val(t); debug_assert!(Layout::from_size_align(size, align).is_ok()); // SAFETY: see rationale in `new` for why this is using the unsafe variant unsafe { Layout::from_size_align_unchecked(size, align) } @@ -170,7 +164,8 @@ impl Layout { #[unstable(feature = "layout_for_ptr", issue = "69835")] pub unsafe fn for_value_raw(t: *const T) -> Self { // SAFETY: we pass along the prerequisites of these functions to the caller - let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) }; + let size = size_of_val(t); + let align = align_of_val(t); debug_assert!(Layout::from_size_align(size, align).is_ok()); // SAFETY: see rationale in `new` for why this is using the unsafe variant unsafe { Layout::from_size_align_unchecked(size, align) } @@ -393,7 +388,7 @@ impl Layout { #[inline] pub fn array(n: usize) -> Result { let (layout, offset) = Layout::new::().repeat(n)?; - debug_assert_eq!(offset, mem::size_of::()); + debug_assert_eq!(offset, size_of::()); Ok(layout.pad_to_align()) } } diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index f53ba98143842..fb87c30cfdd11 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -80,6 +80,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::fmt; +use crate::intrinsics::size_of; use crate::marker; #[stable(feature = "rust1", since = "1.0.0")] @@ -539,7 +540,6 @@ impl PartialEq for BuildHasherDefault { impl Eq for BuildHasherDefault {} mod impls { - use crate::mem; use crate::slice; use super::*; @@ -553,7 +553,7 @@ mod impls { } fn hash_slice(data: &[$ty], state: &mut H) { - let newlen = data.len() * mem::size_of::<$ty>(); + let newlen = data.len() * size_of::<$ty>(); let ptr = data.as_ptr() as *const u8; // SAFETY: `ptr` is valid and aligned, as this macro is only used // for numeric primitives which have no padding. The new slice only @@ -673,7 +673,7 @@ mod impls { #[stable(feature = "rust1", since = "1.0.0")] impl Hash for *const T { fn hash(&self, state: &mut H) { - if mem::size_of::() == mem::size_of::() { + if size_of::() == size_of::() { // Thin pointer state.write_usize(*self as *const () as usize); } else { @@ -693,7 +693,7 @@ mod impls { #[stable(feature = "rust1", since = "1.0.0")] impl Hash for *mut T { fn hash(&self, state: &mut H) { - if mem::size_of::() == mem::size_of::() { + if size_of::() == size_of::() { // Thin pointer state.write_usize(*self as *const () as usize); } else { diff --git a/library/core/src/hash/sip.rs b/library/core/src/hash/sip.rs index 6178b0af137e8..38592151513a3 100644 --- a/library/core/src/hash/sip.rs +++ b/library/core/src/hash/sip.rs @@ -3,8 +3,8 @@ #![allow(deprecated)] // the types in this module are deprecated use crate::cmp; +use crate::intrinsics::size_of; use crate::marker::PhantomData; -use crate::mem; use crate::ptr; /// An implementation of SipHash 1-3. @@ -108,12 +108,12 @@ macro_rules! compress { /// Unsafe because: unchecked indexing at i..i+size_of(int_ty) macro_rules! load_int_le { ($buf:expr, $i:expr, $int_ty:ident) => {{ - debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len()); + debug_assert!($i + size_of::<$int_ty>() <= $buf.len()); let mut data = 0 as $int_ty; ptr::copy_nonoverlapping( $buf.as_ptr().add($i), &mut data as *mut _ as *mut u8, - mem::size_of::<$int_ty>(), + size_of::<$int_ty>(), ); data.to_le() }}; diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index 162ed7d1b8dfe..ab1e930b077ae 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1781,7 +1781,7 @@ macro_rules! int_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_be_bytes(self) -> [u8; size_of::()] { self.to_be().to_ne_bytes() } @@ -1799,7 +1799,7 @@ macro_rules! int_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_le_bytes(self) -> [u8; size_of::()] { self.to_le().to_ne_bytes() } @@ -1834,7 +1834,7 @@ macro_rules! int_impl { // transmute them to arrays of bytes #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] - pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_ne_bytes(self) -> [u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { mem::transmute(self) } @@ -1864,7 +1864,7 @@ macro_rules! int_impl { /// ``` #[unstable(feature = "num_as_ne_bytes", issue = "76976")] #[inline] - pub fn as_ne_bytes(&self) -> &[u8; mem::size_of::()] { + pub fn as_ne_bytes(&self) -> &[u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { &*(self as *const Self as *const _) } @@ -1896,7 +1896,7 @@ macro_rules! int_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_be_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_be(Self::from_ne_bytes(bytes)) } @@ -1926,7 +1926,7 @@ macro_rules! int_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_le_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_le(Self::from_ne_bytes(bytes)) } @@ -1970,7 +1970,7 @@ macro_rules! int_impl { // transmute to them #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] - pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them unsafe { mem::transmute(bytes) } } diff --git a/library/core/src/num/mod.rs b/library/core/src/num/mod.rs index 6bdfa18fa434c..af50181646227 100644 --- a/library/core/src/num/mod.rs +++ b/library/core/src/num/mod.rs @@ -2,7 +2,7 @@ #![stable(feature = "rust1", since = "1.0.0")] -use crate::intrinsics; +use crate::intrinsics::{self, size_of}; use crate::mem; use crate::str::FromStr; diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 8f141a3ff9e97..688d99113483d 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1611,7 +1611,7 @@ macro_rules! uint_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn to_be_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_be_bytes(self) -> [u8; size_of::()] { self.to_be().to_ne_bytes() } @@ -1629,7 +1629,7 @@ macro_rules! uint_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn to_le_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_le_bytes(self) -> [u8; size_of::()] { self.to_le().to_ne_bytes() } @@ -1664,7 +1664,7 @@ macro_rules! uint_impl { // transmute them to arrays of bytes #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] - pub const fn to_ne_bytes(self) -> [u8; mem::size_of::()] { + pub const fn to_ne_bytes(self) -> [u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { mem::transmute(self) } @@ -1694,7 +1694,7 @@ macro_rules! uint_impl { /// ``` #[unstable(feature = "num_as_ne_bytes", issue = "76976")] #[inline] - pub fn as_ne_bytes(&self) -> &[u8; mem::size_of::()] { + pub fn as_ne_bytes(&self) -> &[u8; size_of::()] { // SAFETY: integers are plain old datatypes so we can always transmute them to // arrays of bytes unsafe { &*(self as *const Self as *const _) } @@ -1726,7 +1726,7 @@ macro_rules! uint_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn from_be_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_be_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_be(Self::from_ne_bytes(bytes)) } @@ -1756,7 +1756,7 @@ macro_rules! uint_impl { #[stable(feature = "int_to_from_bytes", since = "1.32.0")] #[rustc_const_stable(feature = "const_int_conversion", since = "1.44.0")] #[inline] - pub const fn from_le_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_le_bytes(bytes: [u8; size_of::()]) -> Self { Self::from_le(Self::from_ne_bytes(bytes)) } @@ -1800,7 +1800,7 @@ macro_rules! uint_impl { // transmute to them #[rustc_allow_const_fn_unstable(const_fn_transmute)] #[inline] - pub const fn from_ne_bytes(bytes: [u8; mem::size_of::()]) -> Self { + pub const fn from_ne_bytes(bytes: [u8; size_of::()]) -> Self { // SAFETY: integers are plain old datatypes so we can always transmute to them unsafe { mem::transmute(bytes) } } diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 663001167865a..bf23aaa6c1d21 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -1,7 +1,6 @@ use super::*; use crate::cmp::Ordering::{self, Equal, Greater, Less}; -use crate::intrinsics; -use crate::mem; +use crate::intrinsics::{self, size_of}; use crate::slice::{self, SliceIndex}; #[lang = "const_ptr"] @@ -372,7 +371,7 @@ impl *const T { where T: Sized, { - let pointee_size = mem::size_of::(); + let pointee_size = size_of::(); assert!(0 < pointee_size && pointee_size <= isize::MAX as usize); // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`. unsafe { intrinsics::ptr_offset_from(self, origin) } diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 807f114ea466c..f984b17e3e076 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -67,7 +67,7 @@ use crate::cmp::Ordering; use crate::fmt; use crate::hash; -use crate::intrinsics::{self, abort, is_aligned_and_not_null, is_nonoverlapping}; +use crate::intrinsics::{self, abort, is_aligned_and_not_null, is_nonoverlapping, size_of}; use crate::mem::{self, MaybeUninit}; #[stable(feature = "rust1", since = "1.0.0")] @@ -441,7 +441,7 @@ pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { let x = x as *mut u8; let y = y as *mut u8; - let len = mem::size_of::() * count; + let len = size_of::() * count; // SAFETY: the caller must guarantee that `x` and `y` are // valid for writes and properly aligned. unsafe { swap_nonoverlapping_bytes(x, y, len) } @@ -451,7 +451,7 @@ pub unsafe fn swap_nonoverlapping(x: *mut T, y: *mut T, count: usize) { pub(crate) unsafe fn swap_nonoverlapping_one(x: *mut T, y: *mut T) { // For types smaller than the block optimization below, // just swap directly to avoid pessimizing codegen. - if mem::size_of::() < 32 { + if size_of::() < 32 { // SAFETY: the caller must guarantee that `x` and `y` are valid // for writes, properly aligned, and non-overlapping. unsafe { @@ -477,7 +477,7 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) { struct Block(u64, u64, u64, u64); struct UnalignedBlock(u64, u64, u64, u64); - let block_size = mem::size_of::(); + let block_size = size_of::(); // Loop through x & y, copying them `Block` at a time // The optimizer should unroll the loop fully for most types @@ -796,7 +796,7 @@ pub const unsafe fn read_unaligned(src: *const T) -> T { // Also, since we just wrote a valid value into `tmp`, it is guaranteed // to be properly initialized. unsafe { - copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::()); + copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::()); tmp.assume_init() } } @@ -982,7 +982,7 @@ pub unsafe fn write_unaligned(dst: *mut T, src: T) { // to `dst` while `src` is owned by this function. unsafe { // `copy_nonoverlapping` takes care of debug_assert. - copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::()); + copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, size_of::()); } mem::forget(src); } @@ -1202,7 +1202,7 @@ pub(crate) unsafe fn align_offset(p: *const T, a: usize) -> usize { } } - let stride = mem::size_of::(); + let stride = size_of::(); // SAFETY: `a` is a power-of-two, therefore non-zero. let a_minus_one = unsafe { unchecked_sub(a, 1) }; if stride == 1 { diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index d849008b88030..1b879533b64b8 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -2,8 +2,9 @@ use crate::cmp::Ordering; use crate::convert::From; use crate::fmt; use crate::hash; +use crate::intrinsics::min_align_of as align_of; use crate::marker::Unsize; -use crate::mem::{self, MaybeUninit}; +use crate::mem::MaybeUninit; use crate::ops::{CoerceUnsized, DispatchFromDyn}; use crate::ptr::Unique; use crate::slice::{self, SliceIndex}; @@ -68,11 +69,11 @@ impl NonNull { #[rustc_const_stable(feature = "const_nonnull_dangling", since = "1.32.0")] #[inline] pub const fn dangling() -> Self { - // SAFETY: mem::align_of() returns a non-zero usize which is then casted + // SAFETY: align_of() returns a non-zero usize which is then casted // to a *mut T. Therefore, `ptr` is not null and the conditions for // calling new_unchecked() are respected. unsafe { - let ptr = mem::align_of::() as *mut T; + let ptr = align_of::() as *mut T; NonNull::new_unchecked(ptr) } } diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index cd6afdccc29d7..af4ba94c8a0ee 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -1,7 +1,7 @@ use crate::convert::From; use crate::fmt; +use crate::intrinsics::min_align_of as align_of; use crate::marker::{PhantomData, Unsize}; -use crate::mem; use crate::ops::{CoerceUnsized, DispatchFromDyn}; /// A wrapper around a raw non-null `*mut T` that indicates that the possessor @@ -70,9 +70,9 @@ impl Unique { /// some other means. #[inline] pub const fn dangling() -> Self { - // SAFETY: mem::align_of() returns a valid, non-null pointer. The + // SAFETY: align_of() returns a valid, non-null pointer. The // conditions to call new_unchecked() are thus respected. - unsafe { Unique::new_unchecked(mem::align_of::() as *mut T) } + unsafe { Unique::new_unchecked(align_of::() as *mut T) } } } diff --git a/library/core/src/slice/iter.rs b/library/core/src/slice/iter.rs index a367b4737dbac..22d978865701a 100644 --- a/library/core/src/slice/iter.rs +++ b/library/core/src/slice/iter.rs @@ -7,7 +7,7 @@ mod macros; use crate::cmp; use crate::cmp::Ordering; use crate::fmt; -use crate::intrinsics::{assume, exact_div, unchecked_sub}; +use crate::intrinsics::{assume, exact_div, size_of, unchecked_sub}; use crate::iter::{FusedIterator, TrustedLen, TrustedRandomAccess}; use crate::marker::{PhantomData, Send, Sized, Sync}; use crate::mem; @@ -39,7 +39,7 @@ impl<'a, T> IntoIterator for &'a mut [T] { // Macro helper functions #[inline(always)] fn size_from_ptr(_: *const T) -> usize { - mem::size_of::() + size_of::() } /// Immutable slice iterator @@ -91,7 +91,7 @@ impl<'a, T> Iter<'a, T> { unsafe { assume(!ptr.is_null()); - let end = if mem::size_of::() == 0 { + let end = if size_of::() == 0 { (ptr as *const u8).wrapping_add(slice.len()) as *const T } else { ptr.add(slice.len()) @@ -225,7 +225,7 @@ impl<'a, T> IterMut<'a, T> { unsafe { assume(!ptr.is_null()); - let end = if mem::size_of::() == 0 { + let end = if size_of::() == 0 { (ptr as *mut u8).wrapping_add(slice.len()) as *mut T } else { ptr.add(slice.len()) diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index 457b2a3605e8b..48f16003f6c39 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -83,7 +83,7 @@ macro_rules! iterator { // Unsafe because the offset must not exceed `self.len()`. #[inline(always)] unsafe fn post_inc_start(&mut self, offset: isize) -> * $raw_mut T { - if mem::size_of::() == 0 { + if size_of::() == 0 { zst_shrink!(self, offset); self.ptr.as_ptr() } else { @@ -100,7 +100,7 @@ macro_rules! iterator { // Unsafe because the offset must not exceed `self.len()`. #[inline(always)] unsafe fn pre_dec_end(&mut self, offset: isize) -> * $raw_mut T { - if mem::size_of::() == 0 { + if size_of::() == 0 { zst_shrink!(self, offset); self.ptr.as_ptr() } else { @@ -140,7 +140,7 @@ macro_rules! iterator { // since we check if the iterator is empty first. unsafe { assume(!self.ptr.as_ptr().is_null()); - if mem::size_of::() != 0 { + if size_of::() != 0 { assume(!self.end.is_null()); } if is_empty!(self) { @@ -166,7 +166,7 @@ macro_rules! iterator { fn nth(&mut self, n: usize) -> Option<$elem> { if n >= len!(self) { // This iterator is now empty. - if mem::size_of::() == 0 { + if size_of::() == 0 { // We have to do it this way as `ptr` may never be 0, but `end` // could be (due to wrapping). self.end = self.ptr.as_ptr(); @@ -347,7 +347,7 @@ macro_rules! iterator { // empty first. unsafe { assume(!self.ptr.as_ptr().is_null()); - if mem::size_of::() != 0 { + if size_of::() != 0 { assume(!self.end.is_null()); } if is_empty!(self) { diff --git a/library/core/src/slice/mod.rs b/library/core/src/slice/mod.rs index 58bf74c8cf470..e1832fa61b9df 100644 --- a/library/core/src/slice/mod.rs +++ b/library/core/src/slice/mod.rs @@ -9,6 +9,7 @@ #![stable(feature = "rust1", since = "1.0.0")] use crate::cmp::Ordering::{self, Equal, Greater, Less}; +use crate::intrinsics::{min_align_of as align_of, size_of}; use crate::marker::Copy; use crate::mem; use crate::num::NonZeroUsize; @@ -585,9 +586,9 @@ impl [T] { let fast_unaligned = cfg!(any(target_arch = "x86", target_arch = "x86_64")); - if fast_unaligned && mem::size_of::() == 1 { + if fast_unaligned && size_of::() == 1 { // Use the llvm.bswap intrinsic to reverse u8s in a usize - let chunk = mem::size_of::(); + let chunk = size_of::(); while i + chunk - 1 < ln / 2 { // SAFETY: There are several things to check here: // @@ -620,9 +621,9 @@ impl [T] { } } - if fast_unaligned && mem::size_of::() == 2 { + if fast_unaligned && size_of::() == 2 { // Use rotate-by-16 to reverse u16s in a u32 - let chunk = mem::size_of::() / 2; + let chunk = size_of::() / 2; while i + chunk - 1 < ln / 2 { // SAFETY: An unaligned u32 can be read from `i` if `i + 1 < ln` // (and obviously `i < ln`), because each element is 2 bytes and @@ -3032,9 +3033,9 @@ impl [T] { } a << k } - let gcd: usize = gcd(mem::size_of::(), mem::size_of::()); - let ts: usize = mem::size_of::() / gcd; - let us: usize = mem::size_of::() / gcd; + let gcd: usize = gcd(size_of::(), size_of::()); + let ts: usize = size_of::() / gcd; + let us: usize = size_of::() / gcd; // Armed with this knowledge, we can find how many `U`s we can fit! let us_len = self.len() / ts * us; @@ -3076,7 +3077,7 @@ impl [T] { #[stable(feature = "slice_align_to", since = "1.30.0")] pub unsafe fn align_to(&self) -> (&[T], &[U], &[T]) { // Note that most of this function will be constant-evaluated, - if mem::size_of::() == 0 || mem::size_of::() == 0 { + if size_of::() == 0 || size_of::() == 0 { // handle ZSTs specially, which is – don't handle them at all. return (self, &[], &[]); } @@ -3085,7 +3086,7 @@ impl [T] { // ptr.align_offset. let ptr = self.as_ptr(); // SAFETY: See the `align_to_mut` method for the detailed safety comment. - let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::()) }; + let offset = unsafe { crate::ptr::align_offset(ptr, align_of::()) }; if offset > self.len() { (self, &[], &[]) } else { @@ -3136,7 +3137,7 @@ impl [T] { #[stable(feature = "slice_align_to", since = "1.30.0")] pub unsafe fn align_to_mut(&mut self) -> (&mut [T], &mut [U], &mut [T]) { // Note that most of this function will be constant-evaluated, - if mem::size_of::() == 0 || mem::size_of::() == 0 { + if size_of::() == 0 || size_of::() == 0 { // handle ZSTs specially, which is – don't handle them at all. return (self, &mut [], &mut []); } @@ -3151,7 +3152,7 @@ impl [T] { // valid pointer `ptr` (it comes from a reference to `self`) and with // a size that is a power of two (since it comes from the alignement for U), // satisfying its safety constraints. - let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::()) }; + let offset = unsafe { crate::ptr::align_offset(ptr, align_of::()) }; if offset > self.len() { (self, &mut [], &mut []) } else { diff --git a/library/core/src/slice/rotate.rs b/library/core/src/slice/rotate.rs index a89596b15ef94..3c5f1e7018b7c 100644 --- a/library/core/src/slice/rotate.rs +++ b/library/core/src/slice/rotate.rs @@ -1,7 +1,8 @@ // ignore-tidy-undocumented-unsafe use crate::cmp; -use crate::mem::{self, MaybeUninit}; +use crate::intrinsics::size_of; +use crate::mem::MaybeUninit; use crate::ptr; /// Rotates the range `[mid-left, mid+right)` such that the element at `mid` becomes the first @@ -65,7 +66,7 @@ use crate::ptr; /// when `left < right` the swapping happens from the left instead. pub unsafe fn ptr_rotate(mut left: usize, mut mid: *mut T, mut right: usize) { type BufType = [usize; 32]; - if mem::size_of::() == 0 { + if size_of::() == 0 { return; } loop { @@ -73,7 +74,7 @@ pub unsafe fn ptr_rotate(mut left: usize, mut mid: *mut T, mut right: usize) if (right == 0) || (left == 0) { return; } - if (left + right < 24) || (mem::size_of::() > mem::size_of::<[usize; 4]>()) { + if (left + right < 24) || (size_of::() > size_of::<[usize; 4]>()) { // Algorithm 1 // Microbenchmarks indicate that the average performance for random shifts is better all // the way until about `left + right == 32`, but the worst case performance breaks even @@ -130,7 +131,7 @@ pub unsafe fn ptr_rotate(mut left: usize, mut mid: *mut T, mut right: usize) } return; // `T` is not a zero-sized type, so it's okay to divide by its size. - } else if cmp::min(left, right) <= mem::size_of::() / mem::size_of::() { + } else if cmp::min(left, right) <= size_of::() / size_of::() { // Algorithm 2 // The `[T; 0]` here is to ensure this is appropriately aligned for T let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit(); diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 2a7693d27efa2..3d3f0ec9a9cad 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -9,6 +9,7 @@ // ignore-tidy-undocumented-unsafe use crate::cmp; +use crate::intrinsics::size_of; use crate::mem::{self, MaybeUninit}; use crate::ptr; @@ -268,8 +269,8 @@ where // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). fn width(l: *mut T, r: *mut T) -> usize { - assert!(mem::size_of::() > 0); - (r as usize - l as usize) / mem::size_of::() + assert!(size_of::() > 0); + (r as usize - l as usize) / size_of::() } loop { @@ -758,7 +759,7 @@ where F: FnMut(&T, &T) -> bool, { // Sorting has no meaningful behavior on zero-sized types. - if mem::size_of::() == 0 { + if size_of::() == 0 { return; } @@ -843,7 +844,7 @@ where panic!("partition_at_index index {} greater than length of slice {}", index, v.len()); } - if mem::size_of::() == 0 { + if size_of::() == 0 { // Sorting has no meaningful behavior on zero-sized types. Do nothing. } else if index == v.len() - 1 { // Find max element and place it in the last position of the array. We're free to use diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs index d03c19e51f3fa..cd0adae9714cd 100644 --- a/library/core/src/sync/atomic.rs +++ b/library/core/src/sync/atomic.rs @@ -116,7 +116,7 @@ use self::Ordering::*; use crate::cell::UnsafeCell; use crate::fmt; -use crate::intrinsics; +use crate::intrinsics::{self, min_align_of as align_of}; use crate::hint::spin_loop; @@ -940,7 +940,6 @@ impl AtomicPtr { #[cfg(target_has_atomic_equal_alignment = "ptr")] #[unstable(feature = "atomic_from_mut", issue = "76314")] pub fn from_mut(v: &mut *mut T) -> &Self { - use crate::mem::align_of; let [] = [(); align_of::>() - align_of::<*mut ()>()]; // SAFETY: // - the mutable reference guarantees unique ownership. @@ -1452,7 +1451,6 @@ macro_rules! atomic_int { #[$cfg_align] #[unstable(feature = "atomic_from_mut", issue = "76314")] pub fn from_mut(v: &mut $int_type) -> &Self { - use crate::mem::align_of; let [] = [(); align_of::() - align_of::<$int_type>()]; // SAFETY: // - the mutable reference guarantees unique ownership.