diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs index b7e6e2d451e3f..39830db2b11db 100644 --- a/compiler/rustc_passes/src/stability.rs +++ b/compiler/rustc_passes/src/stability.rs @@ -54,7 +54,7 @@ fn inherit_const_stability(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { match def_kind { DefKind::AssocFn | DefKind::AssocTy | DefKind::AssocConst => { match tcx.def_kind(tcx.local_parent(def_id)) { - DefKind::Impl { of_trait: true } => true, + DefKind::Impl { .. } => true, _ => false, } } diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 39450f69ce30a..5baf5f009cc89 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -5,8 +5,8 @@ #[stable(feature = "alloc_module", since = "1.28.0")] #[doc(inline)] pub use core::alloc::*; -use core::hint; use core::ptr::{self, NonNull}; +use core::{cmp, hint}; unsafe extern "Rust" { // These are the magic symbols to call the global allocator. rustc generates @@ -182,7 +182,7 @@ pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 { impl Global { #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result, AllocError> { + fn alloc_impl_runtime(layout: Layout, zeroed: bool) -> Result, AllocError> { match layout.size() { 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), // SAFETY: `layout` is non-zero in size, @@ -194,10 +194,26 @@ impl Global { } } + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + fn deallocate_impl_runtime(ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + // SAFETY: + // * We have checked that `layout` is non-zero in size. + // * The caller is obligated to provide a layout that "fits", and in this case, + // "fit" always means a layout that is equal to the original, because our + // `allocate()`, `grow()`, and `shrink()` implementations never returns a larger + // allocation than requested. + // * Other conditions must be upheld by the caller, as per `Allocator::deallocate()`'s + // safety documentation. + unsafe { dealloc(ptr.as_ptr(), layout) } + } + } + // SAFETY: Same as `Allocator::grow` #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces - unsafe fn grow_impl( + fn grow_impl_runtime( &self, ptr: NonNull, old_layout: Layout, @@ -241,10 +257,172 @@ impl Global { }, } } + + // SAFETY: Same as `Allocator::grow` + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + fn shrink_impl_runtime( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + _zeroed: bool, + ) -> Result, AllocError> { + debug_assert!( + new_layout.size() <= old_layout.size(), + "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" + ); + + match new_layout.size() { + // SAFETY: conditions must be upheld by the caller + 0 => unsafe { + self.deallocate(ptr, old_layout); + Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0)) + }, + + // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller + new_size if old_layout.align() == new_layout.align() => unsafe { + // `realloc` probably checks for `new_size <= old_layout.size()` or something similar. + hint::assert_unchecked(new_size <= old_layout.size()); + + let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); + let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; + Ok(NonNull::slice_from_raw_parts(ptr, new_size)) + }, + + // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`, + // both the old and new memory allocation are valid for reads and writes for `new_size` + // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap + // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract + // for `dealloc` must be upheld by the caller. + new_size => unsafe { + let new_ptr = self.allocate(new_layout)?; + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size); + self.deallocate(ptr, old_layout); + Ok(new_ptr) + }, + } + } + + // SAFETY: Same as `Allocator::allocate` + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result, AllocError> { + core::intrinsics::const_eval_select( + (layout, zeroed), + Global::alloc_impl_const, + Global::alloc_impl_runtime, + ) + } + + // SAFETY: Same as `Allocator::deallocate` + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const unsafe fn deallocate_impl(&self, ptr: NonNull, layout: Layout) { + core::intrinsics::const_eval_select( + (ptr, layout), + Global::deallocate_impl_const, + Global::deallocate_impl_runtime, + ) + } + + // SAFETY: Same as `Allocator::grow` + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const unsafe fn grow_impl( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + zeroed: bool, + ) -> Result, AllocError> { + core::intrinsics::const_eval_select( + (self, ptr, old_layout, new_layout, zeroed), + Global::grow_shrink_impl_const, + Global::grow_impl_runtime, + ) + } + + // SAFETY: Same as `Allocator::shrink` + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const unsafe fn shrink_impl( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + core::intrinsics::const_eval_select( + (self, ptr, old_layout, new_layout, false), + Global::grow_shrink_impl_const, + Global::shrink_impl_runtime, + ) + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const fn alloc_impl_const(layout: Layout, zeroed: bool) -> Result, AllocError> { + match layout.size() { + 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)), + // SAFETY: `layout` is non-zero in size, + size => unsafe { + let raw_ptr = core::intrinsics::const_allocate(layout.size(), layout.align()); + let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; + if zeroed { + // SAFETY: the pointer returned by `const_allocate` is valid to write to. + ptr.write_bytes(0, size); + } + Ok(NonNull::slice_from_raw_parts(ptr, size)) + }, + } + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const fn deallocate_impl_const(ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + // SAFETY: We checked for nonzero size; other preconditions must be upheld by caller. + unsafe { + core::intrinsics::const_deallocate(ptr.as_ptr(), layout.size(), layout.align()); + } + } + } + + #[inline] + #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const fn grow_shrink_impl_const( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + zeroed: bool, + ) -> Result, AllocError> { + let new_ptr = self.alloc_impl(new_layout, zeroed)?; + // SAFETY: both pointers are valid and this operations is in bounds. + unsafe { + ptr::copy_nonoverlapping( + ptr.as_ptr(), + new_ptr.as_mut_ptr(), + cmp::min(old_layout.size(), new_layout.size()), + ); + } + unsafe { + self.deallocate_impl(ptr, old_layout); + } + Ok(new_ptr) + } } #[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Allocator for Global { +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +unsafe impl const Allocator for Global { #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces fn allocate(&self, layout: Layout) -> Result, AllocError> { @@ -260,17 +438,8 @@ unsafe impl Allocator for Global { #[inline] #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if layout.size() != 0 { - // SAFETY: - // * We have checked that `layout` is non-zero in size. - // * The caller is obligated to provide a layout that "fits", and in this case, - // "fit" always means a layout that is equal to the original, because our - // `allocate()`, `grow()`, and `shrink()` implementations never returns a larger - // allocation than requested. - // * Other conditions must be upheld by the caller, as per `Allocator::deallocate()`'s - // safety documentation. - unsafe { dealloc(ptr.as_ptr(), layout) } - } + // SAFETY: all conditions must be upheld by the caller + unsafe { self.deallocate_impl(ptr, layout) } } #[inline] @@ -305,40 +474,8 @@ unsafe impl Allocator for Global { old_layout: Layout, new_layout: Layout, ) -> Result, AllocError> { - debug_assert!( - new_layout.size() <= old_layout.size(), - "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" - ); - - match new_layout.size() { - // SAFETY: conditions must be upheld by the caller - 0 => unsafe { - self.deallocate(ptr, old_layout); - Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0)) - }, - - // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller - new_size if old_layout.align() == new_layout.align() => unsafe { - // `realloc` probably checks for `new_size <= old_layout.size()` or something similar. - hint::assert_unchecked(new_size <= old_layout.size()); - - let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size); - let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?; - Ok(NonNull::slice_from_raw_parts(ptr, new_size)) - }, - - // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`, - // both the old and new memory allocation are valid for reads and writes for `new_size` - // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap - // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract - // for `dealloc` must be upheld by the caller. - new_size => unsafe { - let new_ptr = self.allocate(new_layout)?; - ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size); - self.deallocate(ptr, old_layout); - Ok(new_ptr) - }, - } + // SAFETY: all conditions must be upheld by the caller + unsafe { self.shrink_impl(ptr, old_layout, new_layout) } } } diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs index 212d7c8465b6e..e09326759fd1e 100644 --- a/library/alloc/src/collections/mod.rs +++ b/library/alloc/src/collections/mod.rs @@ -84,13 +84,14 @@ impl TryReserveError { reason = "Uncertain how much info should be exposed", issue = "48043" )] - pub fn kind(&self) -> TryReserveErrorKind { + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + pub const fn kind(&self) -> TryReserveErrorKind { self.kind.clone() } } /// Details of the allocation that caused a `TryReserveError` -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(PartialEq, Eq, Debug)] #[unstable( feature = "try_reserve_kind", reason = "Uncertain how much info should be exposed", @@ -120,6 +121,24 @@ pub enum TryReserveErrorKind { }, } +#[unstable( + feature = "try_reserve_kind", + reason = "Uncertain how much info should be exposed", + issue = "48043" +)] +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +#[cfg(not(test))] +impl const Clone for TryReserveErrorKind { + fn clone(&self) -> Self { + match self { + TryReserveErrorKind::CapacityOverflow => TryReserveErrorKind::CapacityOverflow, + TryReserveErrorKind::AllocError { layout, non_exhaustive: () } => { + TryReserveErrorKind::AllocError { layout: *layout, non_exhaustive: () } + } + } + } +} + #[cfg(test)] pub use realalloc::collections::TryReserveErrorKind; diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 3f391fe2c1de8..bb471b5b1ecac 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -101,11 +101,16 @@ #![feature(char_internals)] #![feature(clone_to_uninit)] #![feature(coerce_unsized)] +#![feature(const_clone)] +#![feature(const_cmp)] #![feature(const_convert)] #![feature(const_default)] +#![feature(const_destruct)] #![feature(const_eval_select)] #![feature(const_heap)] #![feature(copied_into_inner)] +#![feature(const_option_ops)] +#![feature(const_try)] #![feature(core_intrinsics)] #![feature(deprecated_suggestion)] #![feature(deref_pure_trait)] @@ -119,6 +124,7 @@ #![feature(fmt_internals)] #![feature(fn_traits)] #![feature(formatting_options)] +#![feature(freeze)] #![feature(generic_atomic)] #![feature(hasher_prefixfree_extras)] #![feature(inplace_iteration)] @@ -172,6 +178,7 @@ #![feature(const_trait_impl)] #![feature(coroutine_trait)] #![feature(decl_macro)] +#![feature(derive_const)] #![feature(dropck_eyepatch)] #![feature(fundamental)] #![feature(hashmap_internals)] diff --git a/library/alloc/src/raw_vec/mod.rs b/library/alloc/src/raw_vec/mod.rs index 236e33e2f450e..7ade262f4375b 100644 --- a/library/alloc/src/raw_vec/mod.rs +++ b/library/alloc/src/raw_vec/mod.rs @@ -4,7 +4,7 @@ // Note: This module is also included in the alloctests crate using #[path] to // run the tests. See the comment there for an explanation why this is the case. -use core::marker::PhantomData; +use core::marker::{Destruct, PhantomData}; use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, Alignment, NonNull, Unique}; use core::{cmp, hint}; @@ -24,7 +24,7 @@ mod tests; // only one location which panics rather than a bunch throughout the module. #[cfg(not(no_global_oom_handling))] #[cfg_attr(not(panic = "immediate-abort"), inline(never))] -fn capacity_overflow() -> ! { +const fn capacity_overflow() -> ! { panic!("capacity overflow"); } @@ -165,6 +165,31 @@ const fn min_non_zero_cap(size: usize) -> usize { } } +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +const impl RawVec { + /// Like `with_capacity`, but parameterized over the choice of + /// allocator for the returned `RawVec`. + #[cfg(not(no_global_oom_handling))] + #[inline] + pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self + { + Self { + inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT), + _marker: PhantomData, + } + } + + /// A specialized version of `self.reserve(len, 1)` which requires the + /// caller to ensure `len == self.capacity()`. + #[cfg(not(no_global_oom_handling))] + #[inline(never)] + pub(crate) fn grow_one(&mut self) + { + // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout + unsafe { self.inner.grow_one(T::LAYOUT) } + } +} + impl RawVec { #[cfg(not(no_global_oom_handling))] pub(crate) const MIN_NON_ZERO_CAP: usize = min_non_zero_cap(size_of::()); @@ -178,17 +203,6 @@ impl RawVec { Self { inner: RawVecInner::new_in(alloc, Alignment::of::()), _marker: PhantomData } } - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] - #[inline] - pub(crate) fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Self { - inner: RawVecInner::with_capacity_in(capacity, alloc, T::LAYOUT), - _marker: PhantomData, - } - } - /// Like `try_with_capacity`, but parameterized over the choice of /// allocator for the returned `RawVec`. #[inline] @@ -327,15 +341,6 @@ impl RawVec { unsafe { self.inner.reserve(len, additional, T::LAYOUT) } } - /// A specialized version of `self.reserve(len, 1)` which requires the - /// caller to ensure `len == self.capacity()`. - #[cfg(not(no_global_oom_handling))] - #[inline(never)] - pub(crate) fn grow_one(&mut self) { - // SAFETY: All calls on self.inner pass T::LAYOUT as the elem_layout - unsafe { self.inner.grow_one(T::LAYOUT) } - } - /// The same as `reserve`, but returns on errors instead of panicking or aborting. pub(crate) fn try_reserve( &mut self, @@ -405,17 +410,12 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { } } -impl RawVecInner { - #[inline] - const fn new_in(alloc: A, align: Alignment) -> Self { - let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero())); - // `cap: 0` means "unallocated". zero-sized types are ignored. - Self { ptr, cap: ZERO_CAP, alloc } - } - +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +const impl RawVecInner { #[cfg(not(no_global_oom_handling))] #[inline] - fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self { + fn with_capacity_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self + { match Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) { Ok(this) => { unsafe { @@ -427,31 +427,13 @@ impl RawVecInner { Err(err) => handle_error(err), } } - - #[inline] - fn try_with_capacity_in( - capacity: usize, - alloc: A, - elem_layout: Layout, - ) -> Result { - Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) - } - - #[cfg(not(no_global_oom_handling))] - #[inline] - fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self { - match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) { - Ok(res) => res, - Err(err) => handle_error(err), - } - } - fn try_allocate_in( capacity: usize, init: AllocInit, alloc: A, elem_layout: Layout, - ) -> Result { + ) -> Result + { // We avoid `unwrap_or_else` here because it bloats the amount of // LLVM IR generated. let layout = match layout_array(capacity, elem_layout) { @@ -484,6 +466,122 @@ impl RawVecInner { }) } + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + #[cfg(not(no_global_oom_handling))] + #[inline] + unsafe fn grow_one(&mut self, elem_layout: Layout) + { + // SAFETY: Precondition passed to caller + if let Err(err) = unsafe { self.grow_amortized(self.cap.as_inner(), 1, elem_layout) } { + handle_error(err); + } + } + + + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + /// - The sum of `len` and `additional` must be greater than the current capacity + unsafe fn grow_amortized( + &mut self, + len: usize, + additional: usize, + elem_layout: Layout, + ) -> Result<(), TryReserveError> + { + // This is ensured by the calling contexts. + debug_assert!(additional > 0); + + if elem_layout.size() == 0 { + // Since we return a capacity of `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. + return Err(CapacityOverflow.into()); + } + + // Nothing we can really do about these checks, sadly. + let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; + + // This guarantees exponential growth. The doubling cannot overflow + // because `cap <= isize::MAX` and the type of `cap` is `usize`. + let cap = cmp::max(self.cap.as_inner() * 2, required_cap); + let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap); + + // SAFETY: + // - cap >= len + additional + // - other preconditions passed to caller + let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; + + // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` + unsafe { self.set_ptr_and_cap(ptr, cap) }; + Ok(()) + } + + /// # Safety + /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to + /// initially construct `self` + /// - `elem_layout`'s size must be a multiple of its alignment + /// - `cap` must be greater than the current capacity + // not marked inline(never) since we want optimizers to be able to observe the specifics of this + // function, see tests/codegen-llvm/vec-reserve-extend.rs. + #[cold] + unsafe fn finish_grow( + &self, + cap: usize, + elem_layout: Layout, + ) -> Result, TryReserveError> + { + let new_layout = layout_array(cap, elem_layout)?; + + let memory = if let Some((ptr, old_layout)) = unsafe { self.current_memory(elem_layout) } { + // FIXME(const-hack): switch to `debug_assert_eq` + debug_assert!(old_layout.align() == new_layout.align()); + unsafe { + // The allocator checks for alignment equality + hint::assert_unchecked(old_layout.align() == new_layout.align()); + self.alloc.grow(ptr, old_layout, new_layout) + } + } else { + self.alloc.allocate(new_layout) + }; + + // FIXME(const-hack): switch back to `map_err` + match memory { + Ok(memory) => Ok(memory), + Err(_) => Err(AllocError { layout: new_layout, non_exhaustive: () }.into()), + } + } +} + +impl RawVecInner { + #[inline] + const fn new_in(alloc: A, align: Alignment) -> Self { + let ptr = Unique::from_non_null(NonNull::without_provenance(align.as_nonzero())); + // `cap: 0` means "unallocated". zero-sized types are ignored. + Self { ptr, cap: ZERO_CAP, alloc } + } + + #[inline] + fn try_with_capacity_in( + capacity: usize, + alloc: A, + elem_layout: Layout, + ) -> Result { + Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc, elem_layout) + } + + #[cfg(not(no_global_oom_handling))] + #[inline] + fn with_capacity_zeroed_in(capacity: usize, alloc: A, elem_layout: Layout) -> Self { + match Self::try_allocate_in(capacity, AllocInit::Zeroed, alloc, elem_layout) { + Ok(res) => res, + Err(err) => handle_error(err), + } + } + #[inline] unsafe fn from_raw_parts_in(ptr: *mut u8, cap: Cap, alloc: A) -> Self { Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc } @@ -519,7 +617,8 @@ impl RawVecInner { /// initially construct `self` /// - `elem_layout`'s size must be a multiple of its alignment #[inline] - unsafe fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull, Layout)> { + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const unsafe fn current_memory(&self, elem_layout: Layout) -> Option<(NonNull, Layout)> { if elem_layout.size() == 0 || self.cap.as_inner() == 0 { None } else { @@ -566,19 +665,6 @@ impl RawVecInner { } } - /// # Safety - /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to - /// initially construct `self` - /// - `elem_layout`'s size must be a multiple of its alignment - #[cfg(not(no_global_oom_handling))] - #[inline] - unsafe fn grow_one(&mut self, elem_layout: Layout) { - // SAFETY: Precondition passed to caller - if let Err(err) = unsafe { self.grow_amortized(self.cap.as_inner(), 1, elem_layout) } { - handle_error(err); - } - } - /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -651,12 +737,13 @@ impl RawVecInner { } #[inline] - fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool { + const fn needs_to_grow(&self, len: usize, additional: usize, elem_layout: Layout) -> bool { additional > self.capacity(elem_layout.size()).wrapping_sub(len) } #[inline] - unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + const unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) { // Allocators currently return a `NonNull<[u8]>` whose length matches // the size requested. If that ever changes, the capacity here should // change to `ptr.len() / size_of::()`. @@ -664,44 +751,6 @@ impl RawVecInner { self.cap = unsafe { Cap::new_unchecked(cap) }; } - /// # Safety - /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to - /// initially construct `self` - /// - `elem_layout`'s size must be a multiple of its alignment - /// - The sum of `len` and `additional` must be greater than the current capacity - unsafe fn grow_amortized( - &mut self, - len: usize, - additional: usize, - elem_layout: Layout, - ) -> Result<(), TryReserveError> { - // This is ensured by the calling contexts. - debug_assert!(additional > 0); - - if elem_layout.size() == 0 { - // Since we return a capacity of `usize::MAX` when `elem_size` is - // 0, getting to here necessarily means the `RawVec` is overfull. - return Err(CapacityOverflow.into()); - } - - // Nothing we can really do about these checks, sadly. - let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?; - - // This guarantees exponential growth. The doubling cannot overflow - // because `cap <= isize::MAX` and the type of `cap` is `usize`. - let cap = cmp::max(self.cap.as_inner() * 2, required_cap); - let cap = cmp::max(min_non_zero_cap(elem_layout.size()), cap); - - // SAFETY: - // - cap >= len + additional - // - other preconditions passed to caller - let ptr = unsafe { self.finish_grow(cap, elem_layout)? }; - - // SAFETY: `finish_grow` would have failed if `cap > isize::MAX` - unsafe { self.set_ptr_and_cap(ptr, cap) }; - Ok(()) - } - /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -729,35 +778,6 @@ impl RawVecInner { Ok(()) } - /// # Safety - /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to - /// initially construct `self` - /// - `elem_layout`'s size must be a multiple of its alignment - /// - `cap` must be greater than the current capacity - // not marked inline(never) since we want optimizers to be able to observe the specifics of this - // function, see tests/codegen-llvm/vec-reserve-extend.rs. - #[cold] - unsafe fn finish_grow( - &self, - cap: usize, - elem_layout: Layout, - ) -> Result, TryReserveError> { - let new_layout = layout_array(cap, elem_layout)?; - - let memory = if let Some((ptr, old_layout)) = unsafe { self.current_memory(elem_layout) } { - debug_assert_eq!(old_layout.align(), new_layout.align()); - unsafe { - // The allocator checks for alignment equality - hint::assert_unchecked(old_layout.align() == new_layout.align()); - self.alloc.grow(ptr, old_layout, new_layout) - } - } else { - self.alloc.allocate(new_layout) - }; - - memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into()) - } - /// # Safety /// - `elem_layout` must be valid for `self`, i.e. it must be the same `elem_layout` used to /// initially construct `self` @@ -841,7 +861,8 @@ impl RawVecInner { #[cfg(not(no_global_oom_handling))] #[cold] #[optimize(size)] -fn handle_error(e: TryReserveError) -> ! { +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +const fn handle_error(e: TryReserveError) -> ! { match e.kind() { CapacityOverflow => capacity_overflow(), AllocError { layout, .. } => handle_alloc_error(layout), @@ -849,6 +870,11 @@ fn handle_error(e: TryReserveError) -> ! { } #[inline] -fn layout_array(cap: usize, elem_layout: Layout) -> Result { - elem_layout.repeat(cap).map(|(layout, _pad)| layout).map_err(|_| CapacityOverflow.into()) +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +const fn layout_array(cap: usize, elem_layout: Layout) -> Result { + // FIXME(const-hack) return to using `map` and `map_err` once `const_closures` is implemented + match elem_layout.repeat(cap) { + Ok((layout, _pad)) => Ok(layout), + Err(_) => Err(CapacityOverflow.into()), + } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 13d38d3c9609a..776364ba891c2 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -81,7 +81,9 @@ use core::cmp::Ordering; use core::hash::{Hash, Hasher}; #[cfg(not(no_global_oom_handling))] use core::iter; -use core::marker::PhantomData; +#[cfg(not(no_global_oom_handling))] +use core::marker::Destruct; +use core::marker::{Freeze, PhantomData}; use core::mem::{self, Assume, ManuallyDrop, MaybeUninit, SizedTypeProperties, TransmuteFrom}; use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; @@ -517,7 +519,8 @@ impl Vec { #[stable(feature = "rust1", since = "1.0.0")] #[must_use] #[rustc_diagnostic_item = "vec_with_capacity"] - pub fn with_capacity(capacity: usize) -> Self { + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + pub const fn with_capacity(capacity: usize) -> Self { Self::with_capacity_in(capacity, Global) } @@ -826,29 +829,26 @@ impl Vec { // SAFETY: A `Vec` always has a non-null pointer. (unsafe { NonNull::new_unchecked(ptr) }, len, capacity) } -} -impl Vec { - /// Constructs a new, empty `Vec`. - /// - /// The vector will not allocate until elements are pushed onto it. - /// - /// # Examples - /// - /// ``` - /// #![feature(allocator_api)] - /// - /// use std::alloc::System; + /// Interns the `Vec`, making the underlying memory read-only. This method should be + /// called during compile time. (This is a no-op if called during runtime) /// - /// # #[allow(unused_mut)] - /// let mut vec: Vec = Vec::new_in(System); - /// ``` - #[inline] - #[unstable(feature = "allocator_api", issue = "32838")] - pub const fn new_in(alloc: A) -> Self { - Vec { buf: RawVec::new_in(alloc), len: 0 } + /// This method must be called if the memory used by `Vec` needs to appear in the final + /// values of constants. + #[unstable(feature = "const_heap", issue = "79597")] + #[rustc_const_unstable(feature = "const_heap", issue = "79597")] + pub const fn const_make_global(mut self) -> &'static [T] + where + T: Freeze, + { + unsafe { core::intrinsics::const_make_global(self.as_mut_ptr().cast()) }; + let me = ManuallyDrop::new(self); + unsafe { slice::from_raw_parts(me.as_ptr(), me.len) } } +} +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +const impl Vec { /// Constructs a new, empty `Vec` with at least the specified capacity /// with the provided allocator. /// @@ -907,10 +907,113 @@ impl Vec { #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self + { Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } } + + /// Appends an element to the back of a collection. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` _bytes_. + /// + /// # Examples + /// + /// ``` + /// let mut vec = vec![1, 2]; + /// vec.push(3); + /// assert_eq!(vec, [1, 2, 3]); + /// ``` + /// + /// # Time complexity + /// + /// Takes amortized *O*(1) time. If the vector's length would exceed its + /// capacity after the push, *O*(*capacity*) time is taken to copy the + /// vector's elements to a larger allocation. This expensive operation is + /// offset by the *capacity* *O*(1) insertions it allows. + #[cfg(not(no_global_oom_handling))] + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_confusables("push_back", "put", "append")] + pub fn push(&mut self, value: T) + { + let _ = self.push_mut(value); + } + + /// Appends an element to the back of a collection, returning a reference to it. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds `isize::MAX` _bytes_. + /// + /// # Examples + /// + /// ``` + /// #![feature(push_mut)] + /// + /// + /// let mut vec = vec![1, 2]; + /// let last = vec.push_mut(3); + /// assert_eq!(*last, 3); + /// assert_eq!(vec, [1, 2, 3]); + /// + /// let last = vec.push_mut(3); + /// *last += 1; + /// assert_eq!(vec, [1, 2, 3, 4]); + /// ``` + /// + /// # Time complexity + /// + /// Takes amortized *O*(1) time. If the vector's length would exceed its + /// capacity after the push, *O*(*capacity*) time is taken to copy the + /// vector's elements to a larger allocation. This expensive operation is + /// offset by the *capacity* *O*(1) insertions it allows. + #[cfg(not(no_global_oom_handling))] + #[inline] + #[unstable(feature = "push_mut", issue = "135974")] + #[must_use = "if you don't need a reference to the value, use `Vec::push` instead"] + pub fn push_mut(&mut self, value: T) -> &mut T + { + // Inform codegen that the length does not change across grow_one(). + let len = self.len; + // This will panic or abort if we would allocate > isize::MAX bytes + // or if the length increment would overflow for zero-sized types. + if len == self.buf.capacity() { + self.buf.grow_one(); + } + unsafe { + let end = self.as_mut_ptr().add(len); + ptr::write(end, value); + self.len = len + 1; + // SAFETY: We just wrote a value to the pointer that will live the lifetime of the reference. + &mut *end + } + } +} + +impl Vec { + /// Constructs a new, empty `Vec`. + /// + /// The vector will not allocate until elements are pushed onto it. + /// + /// # Examples + /// + /// ``` + /// #![feature(allocator_api)] + /// + /// use std::alloc::System; + /// + /// # #[allow(unused_mut)] + /// let mut vec: Vec = Vec::new_in(System); + /// ``` + #[inline] + #[unstable(feature = "allocator_api", issue = "32838")] + pub const fn new_in(alloc: A) -> Self { + Vec { buf: RawVec::new_in(alloc), len: 0 } + } + /// Constructs a new, empty `Vec` with at least the specified capacity /// with the provided allocator. /// @@ -2496,34 +2599,6 @@ impl Vec { } } - /// Appends an element to the back of a collection. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` _bytes_. - /// - /// # Examples - /// - /// ``` - /// let mut vec = vec![1, 2]; - /// vec.push(3); - /// assert_eq!(vec, [1, 2, 3]); - /// ``` - /// - /// # Time complexity - /// - /// Takes amortized *O*(1) time. If the vector's length would exceed its - /// capacity after the push, *O*(*capacity*) time is taken to copy the - /// vector's elements to a larger allocation. This expensive operation is - /// offset by the *capacity* *O*(1) insertions it allows. - #[cfg(not(no_global_oom_handling))] - #[inline] - #[stable(feature = "rust1", since = "1.0.0")] - #[rustc_confusables("push_back", "put", "append")] - pub fn push(&mut self, value: T) { - let _ = self.push_mut(value); - } - /// Appends an element and returns a reference to it if there is sufficient spare capacity, /// otherwise an error is returned with the element. /// @@ -2577,55 +2652,6 @@ impl Vec { } } - /// Appends an element to the back of a collection, returning a reference to it. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` _bytes_. - /// - /// # Examples - /// - /// ``` - /// #![feature(push_mut)] - /// - /// - /// let mut vec = vec![1, 2]; - /// let last = vec.push_mut(3); - /// assert_eq!(*last, 3); - /// assert_eq!(vec, [1, 2, 3]); - /// - /// let last = vec.push_mut(3); - /// *last += 1; - /// assert_eq!(vec, [1, 2, 3, 4]); - /// ``` - /// - /// # Time complexity - /// - /// Takes amortized *O*(1) time. If the vector's length would exceed its - /// capacity after the push, *O*(*capacity*) time is taken to copy the - /// vector's elements to a larger allocation. This expensive operation is - /// offset by the *capacity* *O*(1) insertions it allows. - #[cfg(not(no_global_oom_handling))] - #[inline] - #[unstable(feature = "push_mut", issue = "135974")] - #[must_use = "if you don't need a reference to the value, use `Vec::push` instead"] - pub fn push_mut(&mut self, value: T) -> &mut T { - // Inform codegen that the length does not change across grow_one(). - let len = self.len; - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if len == self.buf.capacity() { - self.buf.grow_one(); - } - unsafe { - let end = self.as_mut_ptr().add(len); - ptr::write(end, value); - self.len = len + 1; - // SAFETY: We just wrote a value to the pointer that will live the lifetime of the reference. - &mut *end - } - } - /// Removes the last element from a vector and returns it, or [`None`] if it /// is empty. /// diff --git a/library/alloctests/lib.rs b/library/alloctests/lib.rs index 73c25679d05ba..acb24beac6934 100644 --- a/library/alloctests/lib.rs +++ b/library/alloctests/lib.rs @@ -19,6 +19,13 @@ #![feature(array_into_iter_constructors)] #![feature(assert_matches)] #![feature(char_internals)] +#![feature(const_alloc_error)] +#![feature(const_cmp)] +#![feature(const_convert)] +#![feature(const_destruct)] +#![feature(const_heap)] +#![feature(const_option_ops)] +#![feature(const_try)] #![feature(copied_into_inner)] #![feature(core_intrinsics)] #![feature(exact_size_is_empty)] diff --git a/library/alloctests/tests/lib.rs b/library/alloctests/tests/lib.rs index b4d3e75b09942..b3d99f360bf34 100644 --- a/library/alloctests/tests/lib.rs +++ b/library/alloctests/tests/lib.rs @@ -1,5 +1,6 @@ #![feature(allocator_api)] #![feature(alloc_layout_extra)] +#![feature(const_heap)] #![feature(iter_array_chunks)] #![feature(assert_matches)] #![feature(wtf8_internals)] diff --git a/library/alloctests/tests/vec.rs b/library/alloctests/tests/vec.rs index dd42230d2e003..8cb939d502aab 100644 --- a/library/alloctests/tests/vec.rs +++ b/library/alloctests/tests/vec.rs @@ -2717,3 +2717,19 @@ fn vec_null_ptr_roundtrip() { let new = roundtripped.with_addr(ptr.addr()); unsafe { new.read() }; } + +#[test] +fn const_heap() { + const X: &'static [u32] = { + let mut v = Vec::with_capacity(6); + let mut x = 1; + while x < 42 { + v.push(x); + x *= 2; + } + assert!(v.len() == 6); + v.const_make_global() + }; + + assert_eq!([1, 2, 4, 8, 16, 32], X); +} diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 9d608d5e83c40..5f3ba645b7ba0 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -102,7 +102,8 @@ impl fmt::Display for AllocError { /// /// [*currently allocated*]: #currently-allocated-memory #[unstable(feature = "allocator_api", issue = "32838")] -pub unsafe trait Allocator { +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +pub const unsafe trait Allocator { /// Attempts to allocate a block of memory. /// /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. @@ -368,9 +369,10 @@ pub unsafe trait Allocator { } #[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Allocator for &A +#[rustc_const_unstable(feature = "const_heap", issue = "79597")] +unsafe impl const Allocator for &A where - A: Allocator + ?Sized, + A: [const] Allocator + ?Sized, { #[inline] fn allocate(&self, layout: Layout) -> Result, AllocError> { diff --git a/tests/codegen-units/item-collection/opaque-return-impls.rs b/tests/codegen-units/item-collection/opaque-return-impls.rs index 1659b62175b78..bde887caf07ee 100644 --- a/tests/codegen-units/item-collection/opaque-return-impls.rs +++ b/tests/codegen-units/item-collection/opaque-return-impls.rs @@ -44,7 +44,7 @@ pub fn foo2() -> Box { //~ MONO_ITEM fn ::test_func2 //~ MONO_ITEM fn alloc::alloc::exchange_malloc //~ MONO_ITEM fn foo2 -//~ MONO_ITEM fn std::alloc::Global::alloc_impl +//~ MONO_ITEM fn std::alloc::Global::alloc_impl_runtime //~ MONO_ITEM fn std::boxed::Box::::new //~ MONO_ITEM fn std::alloc::Layout::from_size_align_unchecked::precondition_check //~ MONO_ITEM fn std::ptr::NonNull::::new_unchecked::precondition_check diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir index 791d6b71a6f78..013361d1d2fb8 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.32bit.panic-unwind.mir @@ -25,17 +25,21 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 18 (inlined ::deallocate) { - let mut _9: *mut u8; - scope 19 (inlined Layout::size) { - } - scope 20 (inlined NonNull::::as_ptr) { - } - scope 21 (inlined std::alloc::dealloc) { - let mut _10: usize; - scope 22 (inlined Layout::size) { - } - scope 23 (inlined Layout::align) { - scope 24 (inlined std::ptr::Alignment::as_usize) { + scope 19 (inlined std::alloc::Global::deallocate_impl) { + scope 20 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _9: *mut u8; + scope 21 (inlined Layout::size) { + } + scope 22 (inlined NonNull::::as_ptr) { + } + scope 23 (inlined std::alloc::dealloc) { + let mut _10: usize; + scope 24 (inlined Layout::size) { + } + scope 25 (inlined Layout::align) { + scope 26 (inlined std::ptr::Alignment::as_usize) { + } + } } } } diff --git a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir index 791d6b71a6f78..013361d1d2fb8 100644 --- a/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/drop_boxed_slice.generic_in_place.PreCodegen.after.64bit.panic-unwind.mir @@ -25,17 +25,21 @@ fn generic_in_place(_1: *mut Box<[T]>) -> () { } } scope 18 (inlined ::deallocate) { - let mut _9: *mut u8; - scope 19 (inlined Layout::size) { - } - scope 20 (inlined NonNull::::as_ptr) { - } - scope 21 (inlined std::alloc::dealloc) { - let mut _10: usize; - scope 22 (inlined Layout::size) { - } - scope 23 (inlined Layout::align) { - scope 24 (inlined std::ptr::Alignment::as_usize) { + scope 19 (inlined std::alloc::Global::deallocate_impl) { + scope 20 (inlined std::alloc::Global::deallocate_impl_runtime) { + let mut _9: *mut u8; + scope 21 (inlined Layout::size) { + } + scope 22 (inlined NonNull::::as_ptr) { + } + scope 23 (inlined std::alloc::dealloc) { + let mut _10: usize; + scope 24 (inlined Layout::size) { + } + scope 25 (inlined Layout::align) { + scope 26 (inlined std::ptr::Alignment::as_usize) { + } + } } } } diff --git a/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.32bit.panic-unwind.diff b/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.32bit.panic-unwind.diff index 15a9d9e39c491..485ff902a7b9d 100644 --- a/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.32bit.panic-unwind.diff +++ b/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.32bit.panic-unwind.diff @@ -9,22 +9,22 @@ let mut _4: *mut [u8]; let mut _5: std::ptr::NonNull<[u8]>; let mut _6: std::result::Result, std::alloc::AllocError>; - let mut _7: &std::alloc::Global; - let mut _8: std::alloc::Layout; + let mut _7: std::alloc::Layout; scope 1 { debug layout => _1; - let mut _9: &std::alloc::Global; scope 2 { debug ptr => _3; } scope 5 (inlined ::allocate) { + scope 6 (inlined std::alloc::Global::alloc_impl) { + } } - scope 6 (inlined NonNull::<[u8]>::as_ptr) { + scope 7 (inlined NonNull::<[u8]>::as_ptr) { } } scope 3 (inlined #[track_caller] Option::::unwrap) { - let mut _10: isize; - let mut _11: !; + let mut _8: isize; + let mut _9: !; scope 4 { } } @@ -35,10 +35,10 @@ StorageLive(_2); - _2 = Option::::None; + _2 = const Option::::None; - StorageLive(_10); -- _10 = discriminant(_2); -- switchInt(move _10) -> [0: bb3, 1: bb4, otherwise: bb2]; -+ _10 = const 0_isize; + StorageLive(_8); +- _8 = discriminant(_2); +- switchInt(move _8) -> [0: bb3, 1: bb4, otherwise: bb2]; ++ _8 = const 0_isize; + switchInt(const 0_isize) -> [0: bb3, 1: bb4, otherwise: bb2]; } @@ -59,30 +59,26 @@ } bb3: { - _11 = option::unwrap_failed() -> unwind continue; + _9 = option::unwrap_failed() -> unwind continue; } bb4: { - _1 = move ((_2 as Some).0: std::alloc::Layout); + _1 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(4 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x00000000): std::ptr::alignment::AlignmentEnum) }}; - StorageDead(_10); + StorageDead(_8); StorageDead(_2); StorageLive(_3); StorageLive(_4); StorageLive(_5); StorageLive(_6); StorageLive(_7); - _9 = const main::promoted[0]; - _7 = copy _9; - StorageLive(_8); -- _8 = copy _1; -- _6 = std::alloc::Global::alloc_impl(move _7, move _8, const false) -> [return: bb5, unwind continue]; -+ _8 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(4 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x00000000): std::ptr::alignment::AlignmentEnum) }}; -+ _6 = std::alloc::Global::alloc_impl(copy _9, const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(4 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x00000000): std::ptr::alignment::AlignmentEnum) }}, const false) -> [return: bb5, unwind continue]; +- _7 = copy _1; +- _6 = std::alloc::Global::alloc_impl_runtime(move _7, const false) -> [return: bb5, unwind continue]; ++ _7 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(4 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x00000000): std::ptr::alignment::AlignmentEnum) }}; ++ _6 = std::alloc::Global::alloc_impl_runtime(const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(4 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x00000000): std::ptr::alignment::AlignmentEnum) }}, const false) -> [return: bb5, unwind continue]; } bb5: { - StorageDead(_8); StorageDead(_7); _5 = Result::, std::alloc::AllocError>::unwrap(move _6) -> [return: bb1, unwind continue]; } diff --git a/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.64bit.panic-unwind.diff b/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.64bit.panic-unwind.diff index df008ececae30..beee899dafe6e 100644 --- a/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.64bit.panic-unwind.diff +++ b/tests/mir-opt/pre-codegen/issue_117368_print_invalid_constant.main.GVN.64bit.panic-unwind.diff @@ -9,22 +9,22 @@ let mut _4: *mut [u8]; let mut _5: std::ptr::NonNull<[u8]>; let mut _6: std::result::Result, std::alloc::AllocError>; - let mut _7: &std::alloc::Global; - let mut _8: std::alloc::Layout; + let mut _7: std::alloc::Layout; scope 1 { debug layout => _1; - let mut _9: &std::alloc::Global; scope 2 { debug ptr => _3; } scope 5 (inlined ::allocate) { + scope 6 (inlined std::alloc::Global::alloc_impl) { + } } - scope 6 (inlined NonNull::<[u8]>::as_ptr) { + scope 7 (inlined NonNull::<[u8]>::as_ptr) { } } scope 3 (inlined #[track_caller] Option::::unwrap) { - let mut _10: isize; - let mut _11: !; + let mut _8: isize; + let mut _9: !; scope 4 { } } @@ -35,10 +35,10 @@ StorageLive(_2); - _2 = Option::::None; + _2 = const Option::::None; - StorageLive(_10); -- _10 = discriminant(_2); -- switchInt(move _10) -> [0: bb3, 1: bb4, otherwise: bb2]; -+ _10 = const 0_isize; + StorageLive(_8); +- _8 = discriminant(_2); +- switchInt(move _8) -> [0: bb3, 1: bb4, otherwise: bb2]; ++ _8 = const 0_isize; + switchInt(const 0_isize) -> [0: bb3, 1: bb4, otherwise: bb2]; } @@ -59,30 +59,26 @@ } bb3: { - _11 = option::unwrap_failed() -> unwind continue; + _9 = option::unwrap_failed() -> unwind continue; } bb4: { - _1 = move ((_2 as Some).0: std::alloc::Layout); + _1 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(8 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x0000000000000000): std::ptr::alignment::AlignmentEnum) }}; - StorageDead(_10); + StorageDead(_8); StorageDead(_2); StorageLive(_3); StorageLive(_4); StorageLive(_5); StorageLive(_6); StorageLive(_7); - _9 = const main::promoted[0]; - _7 = copy _9; - StorageLive(_8); -- _8 = copy _1; -- _6 = std::alloc::Global::alloc_impl(move _7, move _8, const false) -> [return: bb5, unwind continue]; -+ _8 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(8 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x0000000000000000): std::ptr::alignment::AlignmentEnum) }}; -+ _6 = std::alloc::Global::alloc_impl(copy _9, const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(8 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x0000000000000000): std::ptr::alignment::AlignmentEnum) }}, const false) -> [return: bb5, unwind continue]; +- _7 = copy _1; +- _6 = std::alloc::Global::alloc_impl_runtime(move _7, const false) -> [return: bb5, unwind continue]; ++ _7 = const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(8 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x0000000000000000): std::ptr::alignment::AlignmentEnum) }}; ++ _6 = std::alloc::Global::alloc_impl_runtime(const Layout {{ size: Indirect { alloc_id: ALLOC0, offset: Size(8 bytes) }: usize, align: std::ptr::Alignment(Scalar(0x0000000000000000): std::ptr::alignment::AlignmentEnum) }}, const false) -> [return: bb5, unwind continue]; } bb5: { - StorageDead(_8); StorageDead(_7); _5 = Result::, std::alloc::AllocError>::unwrap(move _6) -> [return: bb1, unwind continue]; } diff --git a/tests/ui/consts/const-eval/heap/vec-not-made-global.rs b/tests/ui/consts/const-eval/heap/vec-not-made-global.rs new file mode 100644 index 0000000000000..4f78e977e4d59 --- /dev/null +++ b/tests/ui/consts/const-eval/heap/vec-not-made-global.rs @@ -0,0 +1,5 @@ +#![feature(const_heap)] +const V: Vec = Vec::with_capacity(1); +//~^ ERROR: encountered `const_allocate` pointer in final value that was not made global + +fn main() {} diff --git a/tests/ui/consts/const-eval/heap/vec-not-made-global.stderr b/tests/ui/consts/const-eval/heap/vec-not-made-global.stderr new file mode 100644 index 0000000000000..595cbeb8df26b --- /dev/null +++ b/tests/ui/consts/const-eval/heap/vec-not-made-global.stderr @@ -0,0 +1,10 @@ +error: encountered `const_allocate` pointer in final value that was not made global + --> $DIR/vec-not-made-global.rs:2:1 + | +LL | const V: Vec = Vec::with_capacity(1); + | ^^^^^^^^^^^^^^^^^ + | + = note: use `const_make_global` to turn allocated pointers into immutable globals before returning + +error: aborting due to 1 previous error +