Skip to content

Inline mem::size_of & mem::align_of #80631

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions library/alloc/src/collections/vec_deque/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,10 @@
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::size_of;
use core::iter::{repeat_with, FromIterator};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::mem::ManuallyDrop;
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice;
Expand Down Expand Up @@ -58,7 +59,7 @@ mod tests;
const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
const MINIMUM_CAPACITY: usize = 1; // 2 - 1

const MAXIMUM_ZST_CAPACITY: usize = 1 << (core::mem::size_of::<usize>() * 8 - 1); // Largest possible power of two
const MAXIMUM_ZST_CAPACITY: usize = 1 << (size_of::<usize>() * 8 - 1); // Largest possible power of two
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This could be updated to use usize::BITS


/// A double-ended queue implemented with a growable ring buffer.
///
Expand Down Expand Up @@ -157,7 +158,7 @@ impl<T> VecDeque<T> {
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
Expand Down Expand Up @@ -2795,7 +2796,7 @@ impl<T> From<Vec<T>> for VecDeque<T> {
// because `usize::MAX` (the capacity returned by `capacity()` for ZST)
// is not a power of two and thus it'll always try
// to reserve more memory which will panic for ZST (rust-lang/rust#78532)
if (!buf.capacity().is_power_of_two() && mem::size_of::<T>() != 0)
if (!buf.capacity().is_power_of_two() && size_of::<T>() != 0)
|| (buf.capacity() < (MINIMUM_CAPACITY + 1))
|| (buf.capacity() == len)
{
Expand Down
26 changes: 13 additions & 13 deletions library/alloc/src/raw_vec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@

use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::intrinsics::{self, min_align_of as align_of, size_of};
use core::mem::{ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
Expand Down Expand Up @@ -171,7 +171,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}

fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
Expand Down Expand Up @@ -228,7 +228,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
if size_of::<T>() == 0 { usize::MAX } else { self.cap }
}

/// Returns a shared reference to the allocator backing this `RawVec`.
Expand All @@ -237,14 +237,14 @@ impl<T, A: Allocator> RawVec<T, A> {
}

fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
if size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
let align = align_of::<T>();
let size = size_of::<T>() * self.cap;
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
Expand Down Expand Up @@ -367,8 +367,8 @@ impl<T, A: Allocator> RawVec<T, A> {
}

fn capacity_from_bytes(excess: usize) -> usize {
debug_assert_ne!(mem::size_of::<T>(), 0);
excess / mem::size_of::<T>()
debug_assert_ne!(size_of::<T>(), 0);
excess / size_of::<T>()
}

fn set_ptr(&mut self, ptr: NonNull<[u8]>) {
Expand All @@ -387,7 +387,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);

if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
Expand All @@ -406,7 +406,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
// Note that `min_non_zero_cap` is computed statically.
let elem_size = mem::size_of::<T>();
let elem_size = size_of::<T>();
let min_non_zero_cap = if elem_size == 1 {
8
} else if elem_size <= 1024 {
Expand All @@ -428,7 +428,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
if size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow);
Expand All @@ -447,7 +447,7 @@ impl<T, A: Allocator> RawVec<T, A> {
assert!(amount <= self.capacity(), "Tried to shrink to a larger capacity");

let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = amount * mem::size_of::<T>();
let new_size = amount * size_of::<T>();

let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
Expand Down
13 changes: 7 additions & 6 deletions library/alloc/src/slice.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@

use core::borrow::{Borrow, BorrowMut};
use core::cmp::Ordering::{self, Less};
use core::mem::{self, size_of};
use core::intrinsics::size_of;
use core::mem;
use core::ptr;

use crate::alloc::{Allocator, Global};
Expand Down Expand Up @@ -411,10 +412,10 @@ impl<T> [T] {
}};
}

let sz_u8 = mem::size_of::<(K, u8)>();
let sz_u16 = mem::size_of::<(K, u16)>();
let sz_u32 = mem::size_of::<(K, u32)>();
let sz_usize = mem::size_of::<(K, usize)>();
let sz_u8 = size_of::<(K, u8)>();
let sz_u16 = size_of::<(K, u16)>();
let sz_u32 = size_of::<(K, u32)>();
let sz_usize = size_of::<(K, usize)>();

let len = self.len();
if len < 2 {
Expand Down Expand Up @@ -1004,7 +1005,7 @@ where
impl<T> Drop for MergeHole<T> {
fn drop(&mut self) {
// `T` is not a zero-sized type, so it's okay to divide by its size.
let len = (self.end as usize - self.start as usize) / mem::size_of::<T>();
let len = (self.end as usize - self.start as usize) / size_of::<T>();
unsafe {
ptr::copy_nonoverlapping(self.start, self.dest, len);
}
Expand Down
10 changes: 5 additions & 5 deletions library/alloc/src/vec/into_iter.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use core::fmt;
use core::intrinsics::arith_offset;
use core::intrinsics::{arith_offset, size_of};
use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccess};
use core::marker::PhantomData;
use core::mem::{self};
Expand Down Expand Up @@ -122,7 +122,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
fn next(&mut self) -> Option<T> {
if self.ptr as *const _ == self.end {
None
} else if mem::size_of::<T>() == 0 {
} else if size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
Expand All @@ -140,7 +140,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {

#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if mem::size_of::<T>() == 0 {
let exact = if size_of::<T>() == 0 {
(self.end as usize).wrapping_sub(self.ptr as usize)
} else {
unsafe { self.end.offset_from(self.ptr) as usize }
Expand All @@ -166,7 +166,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
if size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
Expand All @@ -177,7 +177,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
} else if mem::size_of::<T>() == 0 {
} else if size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };

Expand Down
4 changes: 2 additions & 2 deletions library/alloc/src/vec/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ use core::cmp::{self, Ordering};
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::{arith_offset, assume};
use core::intrinsics::{arith_offset, assume, size_of};
use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit};
Expand Down Expand Up @@ -2105,7 +2105,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
let mut me = ManuallyDrop::new(self);
let alloc = ptr::read(me.allocator());
let begin = me.as_mut_ptr();
let end = if mem::size_of::<T>() == 0 {
let end = if size_of::<T>() == 0 {
arith_offset(begin as *const i8, me.len() as isize) as *const T
} else {
begin.add(me.len()) as *const T
Expand Down
13 changes: 6 additions & 7 deletions library/alloc/src/vec/source_iter_marker.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use core::intrinsics::{min_align_of as align_of, size_of};
use core::iter::{InPlaceIterable, SourceIter};
use core::mem::{self, ManuallyDrop};
use core::ptr::{self};
use core::mem::ManuallyDrop;
use core::ptr;

use super::{AsIntoIter, InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};

Expand Down Expand Up @@ -31,11 +32,9 @@ where
// a) no ZSTs as there would be no allocation to reuse and pointer arithmetic would panic
// b) size match as required by Alloc contract
// c) alignments match as required by Alloc contract
if mem::size_of::<T>() == 0
|| mem::size_of::<T>()
!= mem::size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
|| mem::align_of::<T>()
!= mem::align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
if size_of::<T>() == 0
|| size_of::<T>() != size_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
|| align_of::<T>() != align_of::<<<I as SourceIter>::Source as AsIntoIter>::Item>()
{
// fallback to more generic implementations
return SpecFromIterNested::from_iter(iterator);
Expand Down
25 changes: 10 additions & 15 deletions library/core/src/alloc/layout.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,11 @@
use crate::cmp;
use crate::fmt;
use crate::mem;
use crate::intrinsics::{
min_align_of as align_of, min_align_of_val as align_of_val, size_of, size_of_val,
};
use crate::num::NonZeroUsize;
use crate::ptr::NonNull;

// While this function is used in one place and its implementation
// could be inlined, the previous attempts to do so made rustc
// slower:
//
// * https://github.com/rust-lang/rust/pull/72189
// * https://github.com/rust-lang/rust/pull/79827
const fn size_align<T>() -> (usize, usize) {
(mem::size_of::<T>(), mem::align_of::<T>())
}

/// Layout of a block of memory.
///
/// An instance of `Layout` describes a particular layout of memory.
Expand Down Expand Up @@ -121,7 +113,8 @@ impl Layout {
#[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
#[inline]
pub const fn new<T>() -> Self {
let (size, align) = size_align::<T>();
let size = size_of::<T>();
let align = align_of::<T>();
// SAFETY: the align is guaranteed by Rust to be a power of two and
// the size+align combo is guaranteed to fit in our address space. As a
// result use the unchecked constructor here to avoid inserting code
Expand All @@ -135,7 +128,8 @@ impl Layout {
#[stable(feature = "alloc_layout", since = "1.28.0")]
#[inline]
pub fn for_value<T: ?Sized>(t: &T) -> Self {
let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
let size = size_of_val(t);
let align = align_of_val(t);
debug_assert!(Layout::from_size_align(size, align).is_ok());
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
Expand Down Expand Up @@ -170,7 +164,8 @@ impl Layout {
#[unstable(feature = "layout_for_ptr", issue = "69835")]
pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
// SAFETY: we pass along the prerequisites of these functions to the caller
let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
let size = size_of_val(t);
let align = align_of_val(t);
Comment on lines 166 to +168
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is inconsistency in safety of size_of_val_raw function and intrinsic used to implement it.

debug_assert!(Layout::from_size_align(size, align).is_ok());
// SAFETY: see rationale in `new` for why this is using the unsafe variant
unsafe { Layout::from_size_align_unchecked(size, align) }
Expand Down Expand Up @@ -393,7 +388,7 @@ impl Layout {
#[inline]
pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
let (layout, offset) = Layout::new::<T>().repeat(n)?;
debug_assert_eq!(offset, mem::size_of::<T>());
debug_assert_eq!(offset, size_of::<T>());
Ok(layout.pad_to_align())
}
}
Expand Down
8 changes: 4 additions & 4 deletions library/core/src/hash/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@
#![stable(feature = "rust1", since = "1.0.0")]

use crate::fmt;
use crate::intrinsics::size_of;
use crate::marker;

#[stable(feature = "rust1", since = "1.0.0")]
Expand Down Expand Up @@ -539,7 +540,6 @@ impl<H> PartialEq for BuildHasherDefault<H> {
impl<H> Eq for BuildHasherDefault<H> {}

mod impls {
use crate::mem;
use crate::slice;

use super::*;
Expand All @@ -553,7 +553,7 @@ mod impls {
}

fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = data.len() * mem::size_of::<$ty>();
let newlen = data.len() * size_of::<$ty>();
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
Expand Down Expand Up @@ -673,7 +673,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Hash for *const T {
fn hash<H: Hasher>(&self, state: &mut H) {
if mem::size_of::<Self>() == mem::size_of::<usize>() {
if size_of::<Self>() == size_of::<usize>() {
// Thin pointer
state.write_usize(*self as *const () as usize);
} else {
Expand All @@ -693,7 +693,7 @@ mod impls {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized> Hash for *mut T {
fn hash<H: Hasher>(&self, state: &mut H) {
if mem::size_of::<Self>() == mem::size_of::<usize>() {
if size_of::<Self>() == size_of::<usize>() {
// Thin pointer
state.write_usize(*self as *const () as usize);
} else {
Expand Down
6 changes: 3 additions & 3 deletions library/core/src/hash/sip.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@
#![allow(deprecated)] // the types in this module are deprecated

use crate::cmp;
use crate::intrinsics::size_of;
use crate::marker::PhantomData;
use crate::mem;
use crate::ptr;

/// An implementation of SipHash 1-3.
Expand Down Expand Up @@ -108,12 +108,12 @@ macro_rules! compress {
/// Unsafe because: unchecked indexing at i..i+size_of(int_ty)
macro_rules! load_int_le {
($buf:expr, $i:expr, $int_ty:ident) => {{
debug_assert!($i + mem::size_of::<$int_ty>() <= $buf.len());
debug_assert!($i + size_of::<$int_ty>() <= $buf.len());
let mut data = 0 as $int_ty;
ptr::copy_nonoverlapping(
$buf.as_ptr().add($i),
&mut data as *mut _ as *mut u8,
mem::size_of::<$int_ty>(),
size_of::<$int_ty>(),
);
data.to_le()
}};
Expand Down
Loading