From 7086dd83cca1cf694c7bd171efbf262fa8ffb4aa Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:37:26 -0700 Subject: [PATCH 1/7] compiler: `rustc_abi::Abi` => `BackendRepr` The initial naming of "Abi" was an awful mistake, conveying wrong ideas about how psABIs worked and even more about what the enum meant. It was only meant to represent the way the value would be described to a codegen backend as it was lowered to that intermediate representation. It was never meant to mean anything about the actual psABI handling! The conflation is because LLVM typically will associate a certain form with a certain ABI, but even that does not hold when the special cases that actually exist arise, plus the IR annotations that modify the ABI. Reframe `rustc_abi::Abi` as the `BackendRepr` of the type, and rename `BackendRepr::Aggregate` as `BackendRepr::Memory`. Unfortunately, due to the persistent misunderstandings, this too is now incorrect: - Scattered ABI-relevant code is entangled with BackendRepr - We do not always pre-compute a correct BackendRepr that reflects how we "actually" want this value to be handled, so we leave the backend interface to also inject various special-cases here - In some cases `BackendRepr::Memory` is a "real" aggregate, but in others it is in fact using memory, and in some cases it is a scalar! Our rustc-to-backend lowering code handles this sort of thing right now. That will eventually be addressed by lifting duplicated lowering code to either rustc_codegen_ssa or rustc_target as appropriate. --- compiler/rustc_abi/src/callconv.rs | 16 +-- compiler/rustc_abi/src/layout.rs | 104 ++++++++-------- compiler/rustc_abi/src/layout/ty.rs | 12 +- compiler/rustc_abi/src/lib.rs | 112 +++++++++++------- compiler/rustc_codegen_llvm/src/abi.rs | 10 +- compiler/rustc_codegen_llvm/src/asm.rs | 93 ++++++++------- compiler/rustc_codegen_llvm/src/builder.rs | 4 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 13 +- compiler/rustc_codegen_llvm/src/type_of.rs | 37 +++--- compiler/rustc_codegen_ssa/src/mir/block.rs | 2 +- .../rustc_codegen_ssa/src/mir/constant.rs | 4 +- .../rustc_codegen_ssa/src/mir/debuginfo.rs | 4 +- compiler/rustc_codegen_ssa/src/mir/operand.rs | 39 +++--- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 10 +- .../rustc_codegen_ssa/src/traits/builder.rs | 4 +- .../src/const_eval/dummy_machine.rs | 2 +- .../src/const_eval/eval_queries.rs | 6 +- .../src/const_eval/valtrees.rs | 6 +- .../rustc_const_eval/src/interpret/call.rs | 4 +- .../rustc_const_eval/src/interpret/cast.rs | 2 +- .../src/interpret/discriminant.rs | 2 +- .../src/interpret/intrinsics.rs | 2 +- .../rustc_const_eval/src/interpret/operand.rs | 53 +++++---- .../src/interpret/operator.rs | 12 +- .../rustc_const_eval/src/interpret/place.rs | 12 +- .../src/interpret/validity.rs | 23 ++-- .../src/util/check_validity_requirement.rs | 14 +-- compiler/rustc_lint/src/builtin.rs | 6 +- compiler/rustc_lint/src/foreign_modules.rs | 2 +- compiler/rustc_lint/src/types.rs | 6 +- compiler/rustc_middle/src/ty/layout.rs | 7 +- .../src/build/expr/as_rvalue.rs | 4 +- .../rustc_mir_dataflow/src/value_analysis.rs | 2 +- .../src/dataflow_const_prop.rs | 12 +- compiler/rustc_mir_transform/src/gvn.rs | 19 +-- .../src/known_panics_lint.rs | 10 +- compiler/rustc_passes/src/layout_test.rs | 6 +- .../rustc_smir/src/rustc_smir/convert/abi.rs | 14 +-- .../rustc_target/src/callconv/loongarch.rs | 18 +-- compiler/rustc_target/src/callconv/mips64.rs | 8 +- compiler/rustc_target/src/callconv/mod.rs | 29 +++-- compiler/rustc_target/src/callconv/riscv.rs | 18 +-- compiler/rustc_target/src/callconv/sparc64.rs | 8 +- compiler/rustc_target/src/callconv/x86.rs | 18 +-- compiler/rustc_target/src/callconv/x86_64.rs | 14 ++- .../rustc_target/src/callconv/x86_win64.rs | 27 +++-- compiler/rustc_target/src/callconv/xtensa.rs | 6 +- .../src/traits/dyn_compatibility.rs | 10 +- compiler/rustc_ty_utils/src/abi.rs | 14 +-- compiler/rustc_ty_utils/src/layout.rs | 43 ++++--- .../rustc_ty_utils/src/layout/invariant.rs | 42 +++---- 51 files changed, 517 insertions(+), 428 deletions(-) diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index 872cae59a4e03..ee63e46e88c1d 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -6,9 +6,9 @@ mod abi { #[cfg(feature = "nightly")] use rustc_macros::HashStable_Generic; -#[cfg(feature = "nightly")] -use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout}; use crate::{Align, HasDataLayout, Size}; +#[cfg(feature = "nightly")] +use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout}; #[cfg_attr(feature = "nightly", derive(HashStable_Generic))] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] @@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { where Ty: TyAbiInterface<'a, C> + Copy, { - match self.abi { - Abi::Uninhabited => Err(Heterogeneous), + match self.backend_repr { + BackendRepr::Uninhabited => Err(Heterogeneous), // The primitive for this algorithm. - Abi::Scalar(scalar) => { + BackendRepr::Scalar(scalar) => { let kind = match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => RegKind::Integer, abi::Float(_) => RegKind::Float, @@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) } - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { assert!(!self.is_zst()); Ok(HomogeneousAggregate::Homogeneous(Reg { kind: RegKind::Vector, @@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { })) } - Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => { + BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => { // Helper for computing `homogeneous_aggregate`, allowing a custom // starting offset (used below for handling variants). let from_fields_at = @@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(result) } } - Abi::Aggregate { sized: false } => Err(Heterogeneous), + BackendRepr::Memory { sized: false } => Err(Heterogeneous), } } } diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 86de39b8f9752..e6d66f608dae9 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -6,7 +6,7 @@ use rustc_index::Idx; use tracing::debug; use crate::{ - Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, + AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange, }; @@ -125,7 +125,7 @@ impl LayoutCalculator { offsets: [Size::ZERO, b_offset].into(), memory_index: [0, 1].into(), }, - abi: Abi::ScalarPair(a, b), + backend_repr: BackendRepr::ScalarPair(a, b), largest_niche, align, size, @@ -216,7 +216,7 @@ impl LayoutCalculator { LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, - abi: Abi::Uninhabited, + backend_repr: BackendRepr::Uninhabited, largest_niche: None, align: dl.i8_align, size: Size::ZERO, @@ -331,7 +331,7 @@ impl LayoutCalculator { if let Ok(common) = common_non_zst_abi_and_align { // Discard valid range information and allow undef - let field_abi = field.abi.to_union(); + let field_abi = field.backend_repr.to_union(); if let Some((common_abi, common_align)) = common { if common_abi != field_abi { @@ -340,7 +340,7 @@ impl LayoutCalculator { } else { // Fields with the same non-Aggregate ABI should also // have the same alignment - if !matches!(common_abi, Abi::Aggregate { .. }) { + if !matches!(common_abi, BackendRepr::Memory { .. }) { assert_eq!( common_align, field.align.abi, "non-Aggregate field with matching ABI but differing alignment" @@ -369,11 +369,11 @@ impl LayoutCalculator { // If all non-ZST fields have the same ABI, we may forward that ABI // for the union as a whole, unless otherwise inhibited. let abi = match common_non_zst_abi_and_align { - Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true }, + Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true }, Ok(Some((abi, _))) => { if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) { // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } } else { abi } @@ -387,7 +387,7 @@ impl LayoutCalculator { Ok(LayoutData { variants: Variants::Single { index: only_variant_idx }, fields: FieldsShape::Union(union_field_count), - abi, + backend_repr: abi, largest_niche: None, align, size: size.align_to(align.abi), @@ -434,23 +434,23 @@ impl LayoutCalculator { // Already doesn't have any niches Scalar::Union { .. } => {} }; - match &mut st.abi { - Abi::Uninhabited => {} - Abi::Scalar(scalar) => hide_niches(scalar), - Abi::ScalarPair(a, b) => { + match &mut st.backend_repr { + BackendRepr::Uninhabited => {} + BackendRepr::Scalar(scalar) => hide_niches(scalar), + BackendRepr::ScalarPair(a, b) => { hide_niches(a); hide_niches(b); } - Abi::Vector { element, count: _ } => hide_niches(element), - Abi::Aggregate { sized: _ } => {} + BackendRepr::Vector { element, count: _ } => hide_niches(element), + BackendRepr::Memory { sized: _ } => {} } st.largest_niche = None; return Ok(st); } let (start, end) = scalar_valid_range; - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { + match st.backend_repr { + BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => { // Enlarging validity ranges would result in missed // optimizations, *not* wrongly assuming the inner // value is valid. e.g. unions already enlarge validity ranges, @@ -607,8 +607,8 @@ impl LayoutCalculator { } // It can't be a Scalar or ScalarPair because the offset isn't 0. - if !layout.abi.is_uninhabited() { - layout.abi = Abi::Aggregate { sized: true }; + if !layout.is_uninhabited() { + layout.backend_repr = BackendRepr::Memory { sized: true }; } layout.size += this_offset; @@ -627,26 +627,26 @@ impl LayoutCalculator { let same_size = size == variant_layouts[largest_variant_index].size; let same_align = align == variant_layouts[largest_variant_index].align; - let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited + let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) { + BackendRepr::Uninhabited } else if same_size && same_align && others_zst { - match variant_layouts[largest_variant_index].abi { + match variant_layouts[largest_variant_index].backend_repr { // When the total alignment and size match, we can use the // same ABI as the scalar variant with the reserved niche. - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { + BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar), + BackendRepr::ScalarPair(first, second) => { // Only the niche is guaranteed to be initialised, // so use union layouts for the other primitive. if niche_offset == Size::ZERO { - Abi::ScalarPair(niche_scalar, second.to_union()) + BackendRepr::ScalarPair(niche_scalar, second.to_union()) } else { - Abi::ScalarPair(first.to_union(), niche_scalar) + BackendRepr::ScalarPair(first.to_union(), niche_scalar) } } - _ => Abi::Aggregate { sized: true }, + _ => BackendRepr::Memory { sized: true }, } } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let layout = LayoutData { @@ -664,7 +664,7 @@ impl LayoutCalculator { offsets: [niche_offset].into(), memory_index: [0].into(), }, - abi, + backend_repr: abi, largest_niche, size, align, @@ -833,14 +833,14 @@ impl LayoutCalculator { end: (max as u128 & tag_mask), }, }; - let mut abi = Abi::Aggregate { sized: true }; + let mut abi = BackendRepr::Memory { sized: true }; - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; + if layout_variants.iter().all(|v| v.is_uninhabited()) { + abi = BackendRepr::Uninhabited; } else if tag.size(dl) == size { // Make sure we only use scalar layout when the enum is entirely its // own tag (i.e. it has no padding nor any non-ZST variant fields). - abi = Abi::Scalar(tag); + abi = BackendRepr::Scalar(tag); } else { // Try to use a ScalarPair for all tagged enums. // That's possible only if we can find a common primitive type for all variants. @@ -864,8 +864,8 @@ impl LayoutCalculator { break; } }; - let prim = match field.abi { - Abi::Scalar(scalar) => { + let prim = match field.backend_repr { + BackendRepr::Scalar(scalar) => { common_prim_initialized_in_all_variants &= matches!(scalar, Scalar::Initialized { .. }); scalar.primitive() @@ -934,7 +934,7 @@ impl LayoutCalculator { { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). - abi = pair.abi; + abi = pair.backend_repr; } } } @@ -942,12 +942,14 @@ impl LayoutCalculator { // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the // variants to ensure they are consistent. This is because a downcast is // semantically a NOP, and thus should not affect layout. - if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { for variant in &mut layout_variants { // We only do this for variants with fields; the others are not accessed anyway. // Also do not overwrite any already existing "clever" ABIs. - if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) { - variant.abi = abi; + if variant.fields.count() > 0 + && matches!(variant.backend_repr, BackendRepr::Memory { .. }) + { + variant.backend_repr = abi; // Also need to bump up the size and alignment, so that the entire value fits // in here. variant.size = cmp::max(variant.size, size); @@ -970,7 +972,7 @@ impl LayoutCalculator { memory_index: [0].into(), }, largest_niche, - abi, + backend_repr: abi, align, size, max_repr_align, @@ -1252,7 +1254,7 @@ impl LayoutCalculator { } let mut layout_of_single_non_zst_field = None; let sized = unsized_field.is_none(); - let mut abi = Abi::Aggregate { sized }; + let mut abi = BackendRepr::Memory { sized }; let optimize_abi = !repr.inhibit_newtype_abi_optimization(); @@ -1270,16 +1272,16 @@ impl LayoutCalculator { // Field fills the struct and it has a scalar or scalar pair ABI. if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size { - match field.abi { + match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => { - abi = field.abi; + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => { + abi = field.backend_repr; } // But scalar pairs are Rust-specific and get // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi; + BackendRepr::ScalarPair(..) => { + abi = field.backend_repr; } _ => {} } @@ -1288,8 +1290,8 @@ impl LayoutCalculator { // Two non-ZST fields, and they're both scalars. (Some((i, a)), Some((j, b)), None) => { - match (a.abi, b.abi) { - (Abi::Scalar(a), Abi::Scalar(b)) => { + match (a.backend_repr, b.backend_repr) { + (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => { // Order by the memory placement, not source order. let ((i, a), (j, b)) = if offsets[i] < offsets[j] { ((i, a), (j, b)) @@ -1315,7 +1317,7 @@ impl LayoutCalculator { { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). - abi = pair.abi; + abi = pair.backend_repr; } } _ => {} @@ -1325,8 +1327,8 @@ impl LayoutCalculator { _ => {} } } - if fields.iter().any(|f| f.abi.is_uninhabited()) { - abi = Abi::Uninhabited; + if fields.iter().any(|f| f.is_uninhabited()) { + abi = BackendRepr::Uninhabited; } let unadjusted_abi_align = if repr.transparent() { @@ -1344,7 +1346,7 @@ impl LayoutCalculator { Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Arbitrary { offsets, memory_index }, - abi, + backend_repr: abi, largest_niche, align, size, diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs index e029e1426b218..062447ea03f08 100644 --- a/compiler/rustc_abi/src/layout/ty.rs +++ b/compiler/rustc_abi/src/layout/ty.rs @@ -83,8 +83,8 @@ impl<'a> Layout<'a> { &self.0.0.variants } - pub fn abi(self) -> Abi { - self.0.0.abi + pub fn backend_repr(self) -> BackendRepr { + self.0.0.backend_repr } pub fn largest_niche(self) -> Option { @@ -114,7 +114,7 @@ impl<'a> Layout<'a> { pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool { self.size() == data_layout.pointer_size && self.align().abi == data_layout.pointer_align.abi - && matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. })) + && matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. })) } } @@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ty: TyAbiInterface<'a, C>, C: HasDataLayout, { - match self.abi { - Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)), - Abi::Aggregate { .. } => { + match self.backend_repr { + BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)), + BackendRepr::Memory { .. } => { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { self.field(cx, 0).is_single_fp_element(cx) } else { diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 41922aee64877..fac1122c4dfc7 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1344,11 +1344,19 @@ impl AddressSpace { pub const DATA: Self = AddressSpace(0); } -/// Describes how values of the type are passed by target ABIs, -/// in terms of categories of C types there are ABI rules for. +/// The way we represent values to the backend +/// +/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI. +/// In reality, this implies little about that, but is mostly used to describe the syntactic form +/// emitted for the backend, as most backends handle SSA values and blobs of memory differently. +/// The psABI may need consideration in doing so, but this enum does not constitute a promise for +/// how the value will be lowered to the calling convention, in itself. +/// +/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector, +/// and larger values will usually prefer to be represented as memory. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "nightly", derive(HashStable_Generic))] -pub enum Abi { +pub enum BackendRepr { Uninhabited, Scalar(Scalar), ScalarPair(Scalar, Scalar), @@ -1356,19 +1364,23 @@ pub enum Abi { element: Scalar, count: u64, }, - Aggregate { + // FIXME: I sometimes use memory, sometimes use an IR aggregate! + Memory { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, }, } -impl Abi { +impl BackendRepr { /// Returns `true` if the layout corresponds to an unsized type. #[inline] pub fn is_unsized(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, - Abi::Aggregate { sized } => !sized, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::Vector { .. } => false, + BackendRepr::Memory { sized } => !sized, } } @@ -1381,7 +1393,7 @@ impl Abi { #[inline] pub fn is_signed(&self) -> bool { match self { - Abi::Scalar(scal) => match scal.primitive() { + BackendRepr::Scalar(scal) => match scal.primitive() { Primitive::Int(_, signed) => signed, _ => false, }, @@ -1392,61 +1404,67 @@ impl Abi { /// Returns `true` if this is an uninhabited type #[inline] pub fn is_uninhabited(&self) -> bool { - matches!(*self, Abi::Uninhabited) + matches!(*self, BackendRepr::Uninhabited) } /// Returns `true` if this is a scalar type #[inline] pub fn is_scalar(&self) -> bool { - matches!(*self, Abi::Scalar(_)) + matches!(*self, BackendRepr::Scalar(_)) } /// Returns `true` if this is a bool #[inline] pub fn is_bool(&self) -> bool { - matches!(*self, Abi::Scalar(s) if s.is_bool()) + matches!(*self, BackendRepr::Scalar(s) if s.is_bool()) } /// Returns the fixed alignment of this ABI, if any is mandated. pub fn inherent_align(&self, cx: &C) -> Option { Some(match *self { - Abi::Scalar(s) => s.align(cx), - Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)), - Abi::Vector { element, count } => { + BackendRepr::Scalar(s) => s.align(cx), + BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)), + BackendRepr::Vector { element, count } => { cx.data_layout().vector_align(element.size(cx) * count) } - Abi::Uninhabited | Abi::Aggregate { .. } => return None, + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, }) } /// Returns the fixed size of this ABI, if any is mandated. pub fn inherent_size(&self, cx: &C) -> Option { Some(match *self { - Abi::Scalar(s) => { + BackendRepr::Scalar(s) => { // No padding in scalars. s.size(cx) } - Abi::ScalarPair(s1, s2) => { + BackendRepr::ScalarPair(s1, s2) => { // May have some padding between the pair. let field2_offset = s1.size(cx).align_to(s2.align(cx).abi); (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi) } - Abi::Vector { element, count } => { + BackendRepr::Vector { element, count } => { // No padding in vectors, except possibly for trailing padding // to make the size a multiple of align (e.g. for vectors of size 3). (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi) } - Abi::Uninhabited | Abi::Aggregate { .. } => return None, + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, }) } /// Discard validity range information and allow undef. pub fn to_union(&self) -> Self { match *self { - Abi::Scalar(s) => Abi::Scalar(s.to_union()), - Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()), - Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count }, - Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true }, + BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()), + BackendRepr::ScalarPair(s1, s2) => { + BackendRepr::ScalarPair(s1.to_union(), s2.to_union()) + } + BackendRepr::Vector { element, count } => { + BackendRepr::Vector { element: element.to_union(), count } + } + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + BackendRepr::Memory { sized: true } + } } } @@ -1454,12 +1472,12 @@ impl Abi { match (self, other) { // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges. // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x). - (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(), + (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(), ( - Abi::Vector { element: element_l, count: count_l }, - Abi::Vector { element: element_r, count: count_r }, + BackendRepr::Vector { element: element_l, count: count_l }, + BackendRepr::Vector { element: element_r, count: count_r }, ) => element_l.primitive() == element_r.primitive() && count_l == count_r, - (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => { + (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => { l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive() } // Everything else must be strictly identical. @@ -1616,14 +1634,14 @@ pub struct LayoutData { /// must be taken into account. pub variants: Variants, - /// The `abi` defines how this data is passed between functions, and it defines - /// value restrictions via `valid_range`. + /// The `backend_repr` defines how this data will be represented to the codegen backend, + /// and encodes value restrictions via `valid_range`. /// /// Note that this is entirely orthogonal to the recursive structure defined by /// `variants` and `fields`; for example, `ManuallyDrop>` has - /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants` + /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants` /// have to be taken into account to find all fields of this layout. - pub abi: Abi, + pub backend_repr: BackendRepr, /// The leaf scalar with the largest number of invalid values /// (i.e. outside of its `valid_range`), if it exists. @@ -1646,15 +1664,15 @@ pub struct LayoutData { impl LayoutData { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { - match self.abi { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false, - Abi::ScalarPair(..) | Abi::Aggregate { .. } => true, + match self.backend_repr { + BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } /// Returns `true` if this is an uninhabited type pub fn is_uninhabited(&self) -> bool { - self.abi.is_uninhabited() + self.backend_repr.is_uninhabited() } pub fn scalar(cx: &C, scalar: Scalar) -> Self { @@ -1664,7 +1682,7 @@ impl LayoutData { LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, - abi: Abi::Scalar(scalar), + backend_repr: BackendRepr::Scalar(scalar), largest_niche, size, align, @@ -1686,7 +1704,7 @@ where let LayoutData { size, align, - abi, + backend_repr, fields, largest_niche, variants, @@ -1696,7 +1714,7 @@ where f.debug_struct("Layout") .field("size", size) .field("align", align) - .field("abi", abi) + .field("abi", backend_repr) .field("fields", fields) .field("largest_niche", largest_niche) .field("variants", variants) @@ -1732,12 +1750,12 @@ impl LayoutData { /// Returns `true` if the layout corresponds to an unsized type. #[inline] pub fn is_unsized(&self) -> bool { - self.abi.is_unsized() + self.backend_repr.is_unsized() } #[inline] pub fn is_sized(&self) -> bool { - self.abi.is_sized() + self.backend_repr.is_sized() } /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1). @@ -1750,10 +1768,12 @@ impl LayoutData { /// Note that this does *not* imply that the type is irrelevant for layout! It can still have /// non-trivial alignment constraints. You probably want to use `is_1zst` instead. pub fn is_zst(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, - Abi::Uninhabited => self.size.bytes() == 0, - Abi::Aggregate { sized } => sized && self.size.bytes() == 0, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { + false + } + BackendRepr::Uninhabited => self.size.bytes() == 0, + BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } } @@ -1768,8 +1788,8 @@ impl LayoutData { // 2nd point is quite hard to check though. self.size == other.size && self.is_sized() == other.is_sized() - && self.abi.eq_up_to_validity(&other.abi) - && self.abi.is_bool() == other.abi.is_bool() + && self.backend_repr.eq_up_to_validity(&other.backend_repr) + && self.backend_repr.is_bool() == other.backend_repr.is_bool() && self.align.abi == other.align.abi && self.max_repr_align == other.max_repr_align && self.unadjusted_abi_align == other.unadjusted_abi_align diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 8a1ee48c43ca3..855ca01061190 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { match &self.ret.mode { PassMode::Direct(attrs) => { attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); - if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr { apply_range_attr(llvm::AttributePlace::ReturnValue, scalar); } } @@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } PassMode::Direct(attrs) => { let i = apply(attrs); - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { apply_range_attr(llvm::AttributePlace::Argument(i), scalar); } } @@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Pair(a, b) => { let i = apply(a); let ii = apply(b); - if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi { + if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) = + arg.layout.backend_repr + { apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a); apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b); } @@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } if bx.cx.sess().opts.optimize != config::OptLevel::No && llvm_util::get_version() < (19, 0, 0) - && let abi::Abi::Scalar(scalar) = self.ret.layout.abi + && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr && matches!(scalar.primitive(), Int(..)) // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 3c30822a2e2f4..53758967552d4 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>( ) -> &'ll Value { use InlineAsmRegClass::*; let dl = &bx.tcx.data_layout; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>( value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(bx.cx, s); @@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>( let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_i64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.insert_element( bx.const_undef(bx.type_vector(bx.type_f16(), 8)), @@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { @@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f64()) @@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), @@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Value { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { value = bx.extract_element(value, bx.const_i32(0)); @@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>( let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_f64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8)); bx.extract_element(value, bx.const_usize(0)) @@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { @@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i64()) @@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), @@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Type { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(cx, s); let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { cx.type_i64() } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if cx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8), ( X86( @@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { @@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { cx.type_f64() @@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), @@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( _ => layout.llvm_type(cx), } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) => { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8702532c36eee..8e87869f94618 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -545,13 +545,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } let llval = const_llval.unwrap_or_else(|| { let load = self.load(llty, place.val.llval, place.val.align); - if let abi::Abi::Scalar(scalar) = place.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } load }); OperandValue::Immediate(self.to_immediate(llval, place.layout)) - } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { + } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr { let b_offset = a.size(self).align_to(b.align(self).abi); let mut load = |i, scalar: abi::Scalar, layout, align, offset| { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index d04b525761942..c77e00aed9ac5 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()]) } sym::va_arg => { - match fn_abi.ret.layout.abi { - abi::Abi::Scalar(scalar) => { + match fn_abi.ret.layout.backend_repr { + abi::BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { if self.cx().size_of(ret_ty).bytes() < 4 { @@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } sym::raw_eq => { - use abi::Abi::*; + use abi::BackendRepr::*; let tp_ty = fn_args.type_at(0); let layout = self.layout_of(tp_ty).layout; - let use_integer_compare = match layout.abi() { + let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, Uninhabited | Vector { .. } => false, - Aggregate { .. } => { + Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. @@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } let llret_ty = if ret_ty.is_simd() - && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi + && let abi::BackendRepr::Memory { .. } = + self.layout_of(ret_ty).layout.backend_repr { let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 6be4c3f034f12..2b05e24a7babf 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,7 +1,7 @@ use std::fmt::Write; use rustc_abi::Primitive::{Float, Int, Pointer}; -use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants}; +use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants}; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; @@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>( layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, ) -> &'a Type { - match layout.abi { - Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { element, count } => { + match layout.backend_repr { + BackendRepr::Scalar(_) => bug!("handled elsewhere"), + BackendRepr::Vector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } - Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {} + BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {} } let name = match layout.ty.kind() { @@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + false + } } } fn is_llvm_scalar_pair(&self) -> bool { - match self.abi { - Abi::ScalarPair(..) => true, - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::ScalarPair(..) => true, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::Vector { .. } + | BackendRepr::Memory { .. } => false, } } @@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - if let Abi::Scalar(scalar) = self.abi { + if let BackendRepr::Scalar(scalar) = self.backend_repr { // Use a different cache for scalars because pointers to DSTs // can be either wide or thin (data pointers of wide pointers). if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { @@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - match self.abi { - Abi::Scalar(scalar) => { + match self.backend_repr { + BackendRepr::Scalar(scalar) => { if scalar.is_bool() { return cx.type_i1(); } } - Abi::ScalarPair(..) => { + BackendRepr::ScalarPair(..) => { // An immediate pair always contains just the two elements, without any padding // filler, as it should never be stored to memory. return cx.type_struct( @@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - let Abi::ScalarPair(a, b) = self.abi else { + let BackendRepr::ScalarPair(a, b) = self.backend_repr else { bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self); }; let scalar = [a, b][index]; diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index a17a127f01491..283740fa66438 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // the load would just produce `OperandValue::Ref` instead // of the `OperandValue::Immediate` we need for the call. llval = bx.load(bx.backend_type(arg.layout), llval, align); - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if scalar.is_bool() { bx.range_metadata(llval, WrappingRange { start: 0, end: 1 }); } diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 15f45b226f5e4..54b9c9cc89f51 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -1,8 +1,8 @@ +use rustc_abi::BackendRepr; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; use rustc_middle::{bug, mir, span_bug}; -use rustc_target::abi::Abi; use super::FunctionCx; use crate::errors; @@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .map(|field| { if let Some(prim) = field.try_to_scalar() { let layout = bx.layout_of(field_ty); - let Abi::Scalar(scalar) = layout.abi else { + let BackendRepr::Scalar(scalar) = layout.backend_repr else { bug!("from_const: invalid ByVal layout: {:#?}", layout); }; bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout)) diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 146f55f95c21a..21d20475408b2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -2,6 +2,7 @@ use std::collections::hash_map::Entry; use std::marker::PhantomData; use std::ops::Range; +use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx}; use rustc_data_structures::fx::FxHashMap; use rustc_index::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; @@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty}; use rustc_session::config::DebugInfo; use rustc_span::symbol::{Symbol, kw}; use rustc_span::{BytePos, Span, hygiene}; -use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx}; use super::operand::{OperandRef, OperandValue}; use super::place::{PlaceRef, PlaceValue}; @@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // be marked as a `LocalVariable` for MSVC debuggers to visualize // their data correctly. (See #81894 & #88625) let var_ty_layout = self.cx.layout_of(var_ty); - if let Abi::ScalarPair(_, _) = var_ty_layout.abi { + if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr { VariableKind::LocalVariable } else { VariableKind::ArgumentVariable(arg_index) diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 88ceff327d0aa..19101ec2d1ba3 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -4,7 +4,7 @@ use std::fmt; use arrayvec::ArrayVec; use either::Either; use rustc_abi as abi; -use rustc_abi::{Abi, Align, Size}; +use rustc_abi::{Align, BackendRepr, Size}; use rustc_middle::bug; use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range}; use rustc_middle::mir::{self, ConstValue}; @@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let val = match val { ConstValue::Scalar(x) => { - let Abi::Scalar(scalar) = layout.abi else { + let BackendRepr::Scalar(scalar) = layout.backend_repr else { bug!("from_const: invalid ByVal layout: {:#?}", layout); }; let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); @@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } ConstValue::ZeroSized => return OperandRef::zero_sized(layout), ConstValue::Slice { data, meta } => { - let Abi::ScalarPair(a_scalar, _) = layout.abi else { + let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); }; let a = Scalar::from_pointer( @@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { // case where some of the bytes are initialized and others are not. So, we need an extra // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). - match layout.abi { - Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { + match layout.backend_repr { + BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout)); OperandRef { val: OperandValue::Immediate(val), layout } } - Abi::ScalarPair( + BackendRepr::ScalarPair( a @ abi::Scalar::Initialized { .. }, b @ abi::Scalar::Initialized { .. }, ) => { @@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { llval: V, layout: TyAndLayout<'tcx>, ) -> Self { - let val = if let Abi::ScalarPair(..) = layout.abi { + let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. @@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); - let mut val = match (self.val, self.layout.abi) { + let mut val = match (self.val, self.layout.backend_repr) { // If the field is ZST, it has no data. _ if field.is_zst() => OperandValue::ZeroSized, @@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // Extract a scalar component from a pair. - (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => { + (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => { if offset.bytes() == 0 { assert_eq!(field.size, a.size(bx.cx())); OperandValue::Immediate(a_llval) @@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // `#[repr(simd)]` types are also immediate. - (OperandValue::Immediate(llval), Abi::Vector { .. }) => { + (OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => { OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self), }; - match (&mut val, field.abi) { + match (&mut val, field.backend_repr) { (OperandValue::ZeroSized, _) => {} ( OperandValue::Immediate(llval), - Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. }, + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. }, ) => { // Bools in union fields needs to be truncated. *llval = bx.to_immediate(*llval, field); } - (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => { + (OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => { // Bools in union fields needs to be truncated. *a = bx.to_immediate_scalar(*a, a_abi); *b = bx.to_immediate_scalar(*b, b_abi); } // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); - (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => { - assert_matches!(self.layout.abi, Abi::Vector { .. }); + (OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => { + assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. }); let llfield_ty = bx.cx().backend_type(field); @@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { bx.store(*llval, llptr, field.align.abi); *llval = bx.load(llfield_ty, llptr, field.align.abi); } - (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => { + ( + OperandValue::Immediate(_), + BackendRepr::Uninhabited | BackendRepr::Memory { sized: false }, + ) => { bug!() } (OperandValue::Pair(..), _) => bug!(), @@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { bx.store_with_flags(val, dest.val.llval, dest.val.align, flags); } OperandValue::Pair(a, b) => { - let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else { + let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else { bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); }; let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); @@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // However, some SIMD types do not actually use the vector ABI // (in particular, packed SIMD types do not). Ensure we exclude those. let layout = bx.layout_of(constant_ty); - if let Abi::Vector { .. } = layout.abi { + if let BackendRepr::Vector { .. } = layout.backend_repr { let (llval, ty) = self.immediate_const_vector(bx, constant); return OperandRef { val: OperandValue::Immediate(llval), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 6e8c193cd7589..86cf0f9614d3e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValueKind::ZeroSized } else if self.cx.is_backend_immediate(layout) { assert!(!self.cx.is_backend_scalar_pair(layout)); - OperandValueKind::Immediate(match layout.abi { - abi::Abi::Scalar(s) => s, - abi::Abi::Vector { element, .. } => element, + OperandValueKind::Immediate(match layout.backend_repr { + abi::BackendRepr::Scalar(s) => s, + abi::BackendRepr::Vector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { - let abi::Abi::ScalarPair(s1, s2) = layout.abi else { + let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else { span_bug!( self.mir.span, "Couldn't translate {:?} as backend scalar pair", - layout.abi, + layout.backend_repr, ); }; OperandValueKind::Pair(s1, s2) diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 50a5171414695..768a0439ab51b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -1,13 +1,13 @@ use std::assert_matches::assert_matches; use std::ops::Deref; +use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::{Instance, Ty}; use rustc_session::config::OptLevel; use rustc_span::Span; use rustc_target::abi::call::FnAbi; -use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange}; use super::abi::AbiBuilderMethods; use super::asm::AsmBuilderMethods; @@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>: fn from_immediate(&mut self, val: Self::Value) -> Self::Value; fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value { - if let Abi::Scalar(scalar) = layout.abi { + if let BackendRepr::Scalar(scalar) = layout.backend_repr { self.to_immediate_scalar(val, scalar) } else { val diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs index 743924faa21d8..bc2661c4fc71f 100644 --- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs @@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine { interp_ok(match bin_op { Eq | Ne | Lt | Le | Gt | Ge => { // Types can differ, e.g. fn ptrs with different `for`. - assert_eq!(left.layout.abi, right.layout.abi); + assert_eq!(left.layout.backend_repr, right.layout.backend_repr); let size = ecx.pointer_size(); // Just compare the bits. ScalarPairs are compared lexicographically. // We thus always compare pairs and simply fill scalars up with 0. diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 7319c251bbd99..81b9d73b9528c 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,6 +1,7 @@ use std::sync::atomic::Ordering::Relaxed; use either::{Left, Right}; +use rustc_abi::{self as abi, BackendRepr}; use rustc_hir::def::DefKind; use rustc_middle::bug; use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo}; @@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::def_id::LocalDefId; use rustc_span::{DUMMY_SP, Span}; -use rustc_target::abi::{self, Abi}; use tracing::{debug, instrument, trace}; use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine}; @@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>( // type (it's used throughout the compiler and having it work just on literals is not enough) // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar` // from its byte-serialized form). - let force_as_immediate = match op.layout.abi { - Abi::Scalar(abi::Scalar::Initialized { .. }) => true, + let force_as_immediate = match op.layout.backend_repr { + BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true, // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will // not have to generate any duplicate allocations (we preserve the original `AllocId` in diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 9e80e666ba91b..ea88b2ed22e2f 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -1,10 +1,10 @@ +use rustc_abi::{BackendRepr, VariantIdx}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId}; use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_span::DUMMY_SP; -use rustc_target::abi::{Abi, VariantIdx}; use tracing::{debug, instrument, trace}; use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const}; @@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>( let val = ecx.read_immediate(place).unwrap(); // We could allow wide raw pointers where both sides are integers in the future, // but for now we reject them. - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { return Err(ValTreeCreationError::NonSupportedType(ty)); } let val = val.to_scalar(); @@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>( // Fast path to avoid some allocations. return mir::ConstValue::ZeroSized; } - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && (matches!(ty.kind(), ty::Tuple(_)) || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct())) { diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index 85d99900c6ccd..1915bf75c95ad 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // must be compatible. So we just accept everything with Pointer ABI as compatible, // even if this will accept some code that is not stably guaranteed to work. // This also handles function pointers. - let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi { - abi::Abi::Scalar(s) => match s.primitive() { + let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr { + abi::BackendRepr::Scalar(s) => match s.primitive() { abi::Primitive::Pointer(addr_space) => Some(addr_space), _ => None, }, diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 64b15611316f9..60d5e904bd9ab 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { cast_ty: Ty<'tcx>, ) -> InterpResult<'tcx, Scalar> { // Let's make sure v is sign-extended *if* it has a signed type. - let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`. + let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`. let v = match src_layout.ty.kind() { Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?, diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index feed08606799b..bb4ac9556ea88 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Read tag and sanity-check `tag_layout`. let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?; assert_eq!(tag_layout.size, tag_val.layout.size); - assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); + assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed()); trace!("tag value: {}", tag_val); // Figure out which discriminant and variant this corresponds to. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 6148123bdfeb4..80e14ee887cb1 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair(); interp_ok(if overflowed.to_bool()? { let size = l.layout.size; - if l.layout.abi.is_signed() { + if l.layout.backend_repr.is_signed() { // For signed ints the saturated value depends on the sign of the first // term since the sign of the second term can be inferred from this and // the fact that the operation has overflowed (if either is 0 no diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index cd5e2aeca855e..43ae98e74b002 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -5,7 +5,7 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; use rustc_abi as abi; -use rustc_abi::{Abi, HasDataLayout, Size}; +use rustc_abi::{BackendRepr, HasDataLayout, Size}; use rustc_hir::def::Namespace; use rustc_middle::mir::interpret::ScalarSizeMismatch; use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout}; @@ -114,9 +114,9 @@ impl Immediate { } /// Assert that this immediate is a valid value for the given ABI. - pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) { + pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) { match (self, abi) { - (Immediate::Scalar(scalar), Abi::Scalar(s)) => { + (Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => { assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size"); if !matches!(s.primitive(), abi::Primitive::Pointer(..)) { // This is not a pointer, it should not carry provenance. @@ -126,7 +126,7 @@ impl Immediate { ); } } - (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => { + (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => { assert_eq!( a_val.size(), a.size(cx), @@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar(val: Scalar, layout: TyAndLayout<'tcx>) -> Self { - debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); + debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); debug_assert_eq!(val.size(), layout.size); ImmTy { imm: val.into(), layout } } @@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar_pair(a: Scalar, b: Scalar, layout: TyAndLayout<'tcx>) -> Self { debug_assert!( - matches!(layout.abi, Abi::ScalarPair(..)), + matches!(layout.backend_repr, BackendRepr::ScalarPair(..)), "`ImmTy::from_scalar_pair` on non-scalar-pair layout" ); let imm = Immediate::ScalarPair(a, b); @@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { pub fn from_immediate(imm: Immediate, layout: TyAndLayout<'tcx>) -> Self { // Without a `cx` we cannot call `assert_matches_abi`. debug_assert!( - match (imm, layout.abi) { - (Immediate::Scalar(..), Abi::Scalar(..)) => true, - (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true, + match (imm, layout.backend_repr) { + (Immediate::Scalar(..), BackendRepr::Scalar(..)) => true, + (Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true, (Immediate::Uninit, _) if layout.is_sized() => true, _ => false, }, @@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { // Verify that the input matches its type. if cfg!(debug_assertions) { - self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx); + self.assert_matches_abi( + self.layout.backend_repr, + "invalid input to Immediate::offset", + cx, + ); } // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this // remains in-bounds. This cannot actually be violated since projections are type-checked @@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ); // This makes several assumptions about what layouts we will encounter; we match what // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`). - let inner_val: Immediate<_> = match (**self, self.layout.abi) { + let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) { // If the entire value is uninit, then so is the field (can happen in ConstProp). (Immediate::Uninit, _) => Immediate::Uninit, // If the field is uninhabited, we can forget the data (can happen in ConstProp). // `enum S { A(!), B, C }` is an example of an enum with Scalar layout that // has an `Uninhabited` variant, which means this case is possible. - _ if layout.abi.is_uninhabited() => Immediate::Uninit, + _ if layout.is_uninhabited() => Immediate::Uninit, // the field contains no information, can be left uninit // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST) _ if layout.is_zst() => Immediate::Uninit, // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try // to detect those here and also give them no data - _ if matches!(layout.abi, Abi::Aggregate { .. }) + _ if matches!(layout.backend_repr, BackendRepr::Memory { .. }) && matches!(layout.variants, abi::Variants::Single { .. }) && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) => { @@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { **self } // extract fields from types with `ScalarPair` ABI - (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => { + (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => { Immediate::from(if offset.bytes() == 0 { a_val } else { @@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ), }; // Ensure the new layout matches the new value. - inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx); + inner_val.assert_matches_abi( + layout.backend_repr, + "invalid field type in Immediate::offset", + cx, + ); ImmTy::from_immediate(inner_val, layout) } @@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // case where some of the bytes are initialized and others are not. So, we need an extra // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). - interp_ok(match mplace.layout.abi { - Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => { + interp_ok(match mplace.layout.backend_repr { + BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => { let size = s.size(self); assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); let scalar = alloc.read_scalar( @@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { )?; Some(ImmTy::from_scalar(scalar, mplace.layout)) } - Abi::ScalarPair( + BackendRepr::ScalarPair( abi::Scalar::Initialized { value: a, .. }, abi::Scalar::Initialized { value: b, .. }, ) => { @@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { op: &impl Projectable<'tcx, M::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { if !matches!( - op.layout().abi, - Abi::Scalar(abi::Scalar::Initialized { .. }) - | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. }) + op.layout().backend_repr, + BackendRepr::Scalar(abi::Scalar::Initialized { .. }) + | BackendRepr::ScalarPair( + abi::Scalar::Initialized { .. }, + abi::Scalar::Initialized { .. } + ) ) { span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty); } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 380db90748128..cf280e0c1aeb7 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let l_bits = left.layout.size.bits(); // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is // the one MIR operator that does *not* directly map to a single LLVM operation.) - let (shift_amount, overflow) = if right.layout.abi.is_signed() { + let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() { let shift_amount = r_signed(); let rem = shift_amount.rem_euclid(l_bits.into()); // `rem` is guaranteed positive, so the `unwrap` cannot fail @@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }; let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit // Compute the shifted result. - let result = if left.layout.abi.is_signed() { + let result = if left.layout.backend_repr.is_signed() { let l = l_signed(); let result = match bin_op { Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), @@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if overflow && let Some(intrinsic) = throw_ub_on_overflow { throw_ub!(ShiftOverflow { intrinsic, - shift_amount: if right.layout.abi.is_signed() { + shift_amount: if right.layout.backend_repr.is_signed() { Either::Right(r_signed()) } else { Either::Left(r_unsigned()) @@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let size = left.layout.size; // Operations that need special treatment for signed integers - if left.layout.abi.is_signed() { + if left.layout.backend_repr.is_signed() { let op: Option bool> = match bin_op { Lt => Some(i128::lt), Le => Some(i128::le), @@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { BitXor => ImmTy::from_uint(l ^ r, left.layout), _ => { - assert!(!left.layout.abi.is_signed()); + assert!(!left.layout.backend_repr.is_signed()); let op: fn(u128, u128) -> (u128, bool) = match bin_op { Add | AddUnchecked | AddWithOverflow => u128::overflowing_add, Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub, @@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } let offset_bytes = val.to_target_isize(self)?; - if !right.layout.abi.is_signed() && offset_bytes < 0 { + if !right.layout.backend_repr.is_signed() && offset_bytes < 0 { // We were supposed to do an unsigned offset but the result is negative -- this // can only mean that the cast wrapped around. throw_ub!(PointerArithOverflow) diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 81b926a1b65fa..139a1db60e03f 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -5,11 +5,11 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; +use rustc_abi::{Align, BackendRepr, HasDataLayout, Size}; use rustc_ast::Mutability; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::{bug, mir, span_bug}; -use rustc_target::abi::{Abi, Align, HasDataLayout, Size}; use tracing::{instrument, trace}; use super::{ @@ -659,7 +659,7 @@ where // Unfortunately this is too expensive to do in release builds. if cfg!(debug_assertions) { src.assert_matches_abi( - local_layout.abi, + local_layout.backend_repr, "invalid immediate for given destination place", self, ); @@ -683,7 +683,11 @@ where ) -> InterpResult<'tcx> { // We use the sizes from `value` below. // Ensure that matches the type of the place it is written to. - value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self); + value.assert_matches_abi( + layout.backend_repr, + "invalid immediate for given destination place", + self, + ); // Note that it is really important that the type here is the right one, and matches the // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here // to handle padding properly, which is only correct if we never look at this data with the @@ -700,7 +704,7 @@ where alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar) } Immediate::ScalarPair(a_val, b_val) => { - let Abi::ScalarPair(a, b) = layout.abi else { + let BackendRepr::ScalarPair(a, b) = layout.backend_repr else { span_bug!( self.cur_span(), "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 8b5bb1332e796..cd2c1ef36132e 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -11,6 +11,10 @@ use std::num::NonZero; use either::{Left, Right}; use hir::def::DefKind; +use rustc_abi::{ + BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, + WrappingRange, +}; use rustc_ast::Mutability; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; @@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{ use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; use rustc_span::symbol::{Symbol, sym}; -use rustc_target::abi::{ - Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange, -}; use tracing::trace; use super::machine::AllocMap; @@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // Reset provenance: ensure slice tail metadata does not preserve provenance, // and ensure all pointers do not preserve partial provenance. if self.reset_provenance_and_padding { - if matches!(imm.layout.abi, Abi::Scalar(..)) { + if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) { // A thin pointer. If it has provenance, we don't have to do anything. // If it does not, ensure we clear the provenance in memory. if matches!(imm.to_scalar(), Scalar::Int(..)) { @@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let elem = layout.field(cx, 0); // Fast-path for large arrays of simple types that do not contain any padding. - if elem.abi.is_scalar() { + if elem.backend_repr.is_scalar() { out.add_range(base_offset, elem.size * count); } else { for idx in 0..count { @@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, // FIXME: We could avoid some redundant checks here. For newtypes wrapping // scalars, we do the same check on every "level" (e.g., first we check // MyNewtype and then the scalar in there). - match val.layout.abi { - Abi::Uninhabited => { + match val.layout.backend_repr { + BackendRepr::Uninhabited => { let ty = val.layout.ty; throw_validation_failure!(self.path, UninhabitedVal { ty }); } - Abi::Scalar(scalar_layout) => { + BackendRepr::Scalar(scalar_layout) => { if !scalar_layout.is_uninit_valid() { // There is something to check here. let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?; self.visit_scalar(scalar, scalar_layout)?; } } - Abi::ScalarPair(a_layout, b_layout) => { + BackendRepr::ScalarPair(a_layout, b_layout) => { // We can only proceed if *both* scalars need to be initialized. // FIXME: find a way to also check ScalarPair when one side can be uninit but // the other must be init. @@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, self.visit_scalar(b, b_layout)?; } } - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { // No checks here, we assume layout computation gets this right. // (This is harder to check since Miri does not represent these as `Immediate`. We // also cannot use field projections since this might be a newtype around a vector.) } - Abi::Aggregate { .. } => { + BackendRepr::Memory { .. } => { // Nothing to do. } } diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 7a8b976dfc4ee..f743525f35997 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -1,9 +1,9 @@ +use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants}; use rustc_middle::bug; use rustc_middle::ty::layout::{ HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement, }; use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt}; -use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants}; use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine}; use crate::interpret::{InterpCx, MemoryKind}; @@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>( }; // Check the ABI. - let valid = match this.abi { - Abi::Uninhabited => false, // definitely UB - Abi::Scalar(s) => scalar_allows_raw_init(s), - Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), - Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), - Abi::Aggregate { .. } => true, // Fields are checked below. + let valid = match this.backend_repr { + BackendRepr::Uninhabited => false, // definitely UB + BackendRepr::Scalar(s) => scalar_allows_raw_init(s), + BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), + BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), + BackendRepr::Memory { .. } => true, // Fields are checked below. }; if !valid { // This is definitely not okay. diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index dbb8c66753236..6400685101b5a 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -16,6 +16,7 @@ use std::fmt::Write; use ast::token::TokenKind; +use rustc_abi::BackendRepr; use rustc_ast::tokenstream::{TokenStream, TokenTree}; use rustc_ast::visit::{FnCtxt, FnKind}; use rustc_ast::{self as ast, *}; @@ -40,7 +41,6 @@ use rustc_span::edition::Edition; use rustc_span::source_map::Spanned; use rustc_span::symbol::{Ident, Symbol, kw, sym}; use rustc_span::{BytePos, InnerSpan, Span}; -use rustc_target::abi::Abi; use rustc_target::asm::InlineAsmArch; use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt}; use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy; @@ -2466,7 +2466,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue { // Check if this ADT has a constrained layout (like `NonNull` and friends). if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) { - if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi { + if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) = + &layout.backend_repr + { let range = scalar.valid_range(cx); let msg = if !range.contains(0) { "must be non-null" diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs index abe4e3e78ee79..394ea798d3e53 100644 --- a/compiler/rustc_lint/src/foreign_modules.rs +++ b/compiler/rustc_lint/src/foreign_modules.rs @@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>( // `extern` blocks cannot be generic, so we'll always get a layout here. let a_layout = tcx.layout_of(param_env.and(a)).unwrap(); let b_layout = tcx.layout_of(param_env.and(b)).unwrap(); - assert_eq!(a_layout.abi, b_layout.abi); + assert_eq!(a_layout.backend_repr, b_layout.backend_repr); assert_eq!(a_layout.size, b_layout.size); assert_eq!(a_layout.align, b_layout.align); } diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 0751d35cb9c3d..88878a018e7a6 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1,6 +1,7 @@ use std::iter; use std::ops::ControlFlow; +use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange}; use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagMessage; use rustc_hir::{Expr, ExprKind}; @@ -13,7 +14,6 @@ use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass}; use rustc_span::def_id::LocalDefId; use rustc_span::symbol::sym; use rustc_span::{Span, Symbol, source_map}; -use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange}; use rustc_target::spec::abi::Abi as SpecAbi; use tracing::debug; use {rustc_ast as ast, rustc_hir as hir}; @@ -776,8 +776,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>( bug!("should be able to compute the layout of non-polymorphic type"); } - let field_ty_abi = &field_ty_layout.ok()?.abi; - if let Abi::Scalar(field_ty_scalar) = field_ty_abi { + let field_ty_abi = &field_ty_layout.ok()?.backend_repr; + if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi { match field_ty_scalar.valid_range(&tcx) { WrappingRange { start: 0, end } if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 => diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 2c7a3ffd04c8a..0560ffe058a37 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -4,8 +4,9 @@ use std::{cmp, fmt}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ - Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData, - PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants, + AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator, + LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, + Variants, }; use rustc_error_messages::DiagMessage; use rustc_errors::{ @@ -757,7 +758,7 @@ where Some(fields) => FieldsShape::Union(fields), None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() }, }, - abi: Abi::Uninhabited, + backend_repr: BackendRepr::Uninhabited, largest_niche: None, align: tcx.data_layout.i8_align, size: Size::ZERO, diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index fd949a533845e..2357dd7349057 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -13,7 +13,7 @@ use rustc_middle::ty::util::IntTypeExt; use rustc_middle::ty::{self, Ty, UpvarArgs}; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span}; -use rustc_target::abi::{Abi, FieldIdx, Primitive}; +use rustc_target::abi::{BackendRepr, FieldIdx, Primitive}; use tracing::debug; use crate::build::expr::as_place::PlaceBase; @@ -207,7 +207,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); let (op, ty) = (Operand::Move(discr), discr_ty); - if let Abi::Scalar(scalar) = layout.unwrap().abi + if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr && !scalar.is_always_valid(&this.tcx) && let Primitive::Int(int_width, _signed) = scalar.primitive() { diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs index f7d4a082779b7..80151b8ba2dec 100644 --- a/compiler/rustc_mir_dataflow/src/value_analysis.rs +++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs @@ -858,7 +858,7 @@ impl<'tcx> Map<'tcx> { // Allocate a value slot if it doesn't have one, and the user requested one. assert!(place_info.value_index.is_none()); if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty)) - && layout.abi.is_scalar() + && layout.backend_repr.is_scalar() { place_info.value_index = Some(self.value_count.into()); self.value_count += 1; diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 002216f50f2d1..ca24d0d7e70e5 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -2,6 +2,7 @@ //! //! Currently, this pass only propagates scalar values. +use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx}; use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str}; use rustc_const_eval::interpret::{ ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok, @@ -20,7 +21,6 @@ use rustc_mir_dataflow::value_analysis::{ }; use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor}; use rustc_span::DUMMY_SP; -use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx}; use tracing::{debug, debug_span, instrument}; // These constants are somewhat random guesses and have not been optimized. @@ -457,7 +457,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { // a pair and sometimes not. But as a hack we always return a pair // and just make the 2nd component `Bottom` when it does not exist. Some(val) => { - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { let (val, overflow) = val.to_scalar_pair(); (FlatSet::Elem(val), FlatSet::Elem(overflow)) } else { @@ -470,7 +470,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { // Exactly one side is known, attempt some algebraic simplifications. (FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => { let layout = const_arg.layout; - if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) { + if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) { return (FlatSet::Top, FlatSet::Top); } @@ -589,13 +589,13 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { } let place = map.find(place.as_ref())?; - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && let Some(value) = propagatable_scalar(place, state, map) { return Some(Const::Val(ConstValue::Scalar(value), ty)); } - if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { let alloc_id = ecx .intern_with_temp_alloc(layout, |ecx, dest| { try_write_constant(ecx, dest, place, ty, state, map) @@ -641,7 +641,7 @@ fn try_write_constant<'tcx>( } // Fast path for scalars. - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && let Some(value) = propagatable_scalar(place, state, map) { return ecx.write_immediate(Immediate::Scalar(value), dest); diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 79c62372df028..8a646d8cbfef5 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -85,6 +85,7 @@ use std::borrow::Cow; use either::Either; +use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar, @@ -103,7 +104,6 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::DUMMY_SP; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use smallvec::SmallVec; use tracing::{debug, instrument, trace}; @@ -427,7 +427,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }; let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); ImmTy::from_immediate(ptr_imm, ty).into() - } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + } else if matches!( + ty.backend_repr, + BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) + ) { let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; let variant_dest = if let Some(variant) = variant { self.ecx.project_downcast(&dest, variant).discard_err()? @@ -573,12 +576,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // limited transmutes: it only works between types with the same layout, and // cannot transmute pointers to integers. if value.as_mplace_or_imm().is_right() { - let can_transmute = match (value.layout.abi, to.abi) { - (Abi::Scalar(s1), Abi::Scalar(s2)) => { + let can_transmute = match (value.layout.backend_repr, to.backend_repr) { + (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => { s1.size(&self.ecx) == s2.size(&self.ecx) && !matches!(s1.primitive(), Primitive::Pointer(..)) } - (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => { + (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => { a1.size(&self.ecx) == a2.size(&self.ecx) && b1.size(&self.ecx) == b2.size(&self.ecx) && // The alignment of the second component determines its offset, so that also needs to match. @@ -1241,7 +1244,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let as_bits = |value| { let constant = self.evaluated[value].as_ref()?; - if layout.abi.is_scalar() { + if layout.backend_repr.is_scalar() { let scalar = self.ecx.read_scalar(constant).discard_err()?; scalar.to_bits(constant.layout.size).discard_err() } else { @@ -1497,12 +1500,12 @@ fn op_to_prop_const<'tcx>( // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to // avoid. - if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { return None; } // If this constant has scalar ABI, return it as a `ConstValue::Scalar`. - if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi + if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr && let Some(scalar) = ecx.read_scalar(op).discard_err() { if !scalar.try_to_scalar_int().is_ok() { diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 08923748eb275..0604665642aba 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -4,6 +4,7 @@ use std::fmt::Debug; +use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx}; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok, @@ -19,7 +20,6 @@ use rustc_middle::mir::*; use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt}; use rustc_span::Span; -use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx}; use tracing::{debug, instrument, trace}; use crate::errors::{AssertLint, AssertLintKind}; @@ -557,7 +557,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?; let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?; - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { // FIXME `Value` should properly support pairs in `Immediate`... but currently // it does not. let (val, overflow) = val.to_pair(&self.ecx); @@ -651,9 +651,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let to = self.ecx.layout_of(to).ok()?; // `offset` for immediates only supports scalar/scalar-pair ABIs, // so bail out if the target is not one. - match (value.layout.abi, to.abi) { - (Abi::Scalar(..), Abi::Scalar(..)) => {} - (Abi::ScalarPair(..), Abi::ScalarPair(..)) => {} + match (value.layout.backend_repr, to.backend_repr) { + (BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {} + (BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {} _ => return None, } diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs index 0b6cf82ca8b46..7260c12f2784a 100644 --- a/compiler/rustc_passes/src/layout_test.rs +++ b/compiler/rustc_passes/src/layout_test.rs @@ -81,8 +81,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) { let meta_items = attr.meta_item_list().unwrap_or_default(); for meta_item in meta_items { match meta_item.name_or_empty() { + // FIXME: this never was about ABI and now this dump arg is confusing sym::abi => { - tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) }); + tcx.dcx().emit_err(LayoutAbi { + span, + abi: format!("{:?}", ty_layout.backend_repr), + }); } sym::align => { diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index 410bf0f40f467..af24fd23f50b9 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -56,7 +56,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::LayoutData Stable<'tcx> for rustc_abi::TagEncoding { } } -impl<'tcx> Stable<'tcx> for rustc_abi::Abi { +impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { type T = ValueAbi; fn stable(&self, tables: &mut Tables<'_>) -> Self::T { match *self { - rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited, - rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)), - rustc_abi::Abi::ScalarPair(first, second) => { + rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited, + rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)), + rustc_abi::BackendRepr::ScalarPair(first, second) => { ValueAbi::ScalarPair(first.stable(tables), second.stable(tables)) } - rustc_abi::Abi::Vector { element, count } => { + rustc_abi::BackendRepr::Vector { element, count } => { ValueAbi::Vector { element: element.stable(tables), count } } - rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized }, + rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, } } } diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index ffec76370d026..d1234c3cc91d4 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -1,5 +1,7 @@ use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; -use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; +use crate::abi::{ + self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout, +}; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -21,8 +23,8 @@ enum FloatConv { struct CannotUseFpConv; fn is_loongarch_aggregate(arg: &ArgAbi<'_, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>( where Ty: TyAbiInterface<'a, C> + Copy, { - match arg_layout.abi { - Abi::Scalar(scalar) => match scalar.primitive() { + match arg_layout.backend_repr { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); @@ -77,8 +79,8 @@ where } } }, - Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv), - Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields { + BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") } @@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>( } fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { - if let Abi::Scalar(scalar) = arg.layout.abi { + if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 2c3258c8d42d4..5bdf4c2ad77f0 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface}; fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { // Always sign extend u32 values on 64-bit mips - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, signed) = scalar.primitive() { if !signed && i.size().bits() == 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { @@ -24,8 +24,8 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { - match ret.layout.field(cx, i).abi { - abi::Abi::Scalar(scalar) => match scalar.primitive() { + match ret.layout.field(cx, i).backend_repr { + abi::BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Float(abi::F32) => Some(Reg::f32()), abi::Float(abi::F64) => Some(Reg::f64()), _ => None, @@ -109,7 +109,7 @@ where let offset = arg.layout.fields.offset(i); // We only care about aligned doubles - if let abi::Abi::Scalar(scalar) = field.abi { + if let abi::BackendRepr::Scalar(scalar) = field.backend_repr { if scalar.primitive() == abi::Float(abi::F64) { if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 25b001b57e8a8..8c3df9c426b0a 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic; use rustc_span::Symbol; use crate::abi::{ - self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout, + self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface, + TyAndLayout, }; use crate::spec::abi::Abi as SpecAbi; use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi}; @@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> { layout: TyAndLayout<'a, Ty>, scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes, ) -> Self { - let mode = match layout.abi { - Abi::Uninhabited => PassMode::Ignore, - Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)), - Abi::ScalarPair(a, b) => PassMode::Pair( + let mode = match layout.backend_repr { + BackendRepr::Uninhabited => PassMode::Ignore, + BackendRepr::Scalar(scalar) => { + PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)) + } + BackendRepr::ScalarPair(a, b) => PassMode::Pair( scalar_attrs(&layout, a, Size::ZERO), scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)), ), - Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()), - Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout), + BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()), + BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout), }; ArgAbi { layout, mode } } @@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - if let Abi::Scalar(scalar) = self.layout.abi { + if let BackendRepr::Scalar(scalar) = self.layout.backend_repr { if let abi::Int(i, signed) = scalar.primitive() { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { @@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { // That elevates any type difference to an ABI difference since we just use the // full Rust type as the LLVM argument/return type. if matches!(self.mode, PassMode::Direct(..)) - && matches!(self.layout.abi, Abi::Aggregate { .. }) + && matches!(self.layout.backend_repr, BackendRepr::Memory { .. }) { // For aggregates in `Direct` mode to be compatible, the types need to be equal. self.layout.ty == other.layout.ty @@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> { continue; } - match arg.layout.abi { - Abi::Aggregate { .. } => {} + match arg.layout.backend_repr { + BackendRepr::Memory { .. } => {} // This is a fun case! The gist of what this is doing is // that we want callers and callees to always agree on the @@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> { // Note that the intrinsic ABI is exempt here as // that's how we connect up to LLVM and it's unstable // anyway, we control all calls to it in libstd. - Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => { + BackendRepr::Vector { .. } + if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => + { arg.make_indirect(); continue; } diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index f96169e6a618f..c0298edb5ab77 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -4,8 +4,10 @@ // Reference: Clang RISC-V ELF psABI lowering code // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773 +use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; + +use crate::abi; use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; -use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -27,8 +29,8 @@ enum FloatConv { struct CannotUseFpConv; fn is_riscv_aggregate(arg: &ArgAbi<'_, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>( where Ty: TyAbiInterface<'a, C> + Copy, { - match arg_layout.abi { - Abi::Scalar(scalar) => match scalar.primitive() { + match arg_layout.backend_repr { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); @@ -83,8 +85,8 @@ where } } }, - Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv), - Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields { + BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") } @@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>( } fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { - if let Abi::Scalar(scalar) = arg.layout.abi { + if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index 835353f76fc9a..313d8730399b2 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -109,11 +109,11 @@ where return data; } - match layout.abi { - abi::Abi::Scalar(scalar) => { + match layout.backend_repr { + abi::BackendRepr::Scalar(scalar) => { data = arg_scalar(cx, &scalar, offset, data); } - abi::Abi::Aggregate { .. } => { + abi::BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if offset < layout.fields.offset(i) { offset = layout.fields.offset(i); @@ -122,7 +122,7 @@ where } } _ => { - if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi { + if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr { data = arg_scalar_pair(cx, scalar1, scalar2, offset, data); } } diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index e907beecb381a..a5af975d4d24d 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -1,6 +1,6 @@ use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind}; use crate::abi::{ - Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout, + AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout, }; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -105,10 +105,12 @@ where where Ty: TyAbiInterface<'a, C> + Copy, { - match layout.abi { - Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false, - Abi::Vector { .. } => true, - Abi::Aggregate { .. } => { + match layout.backend_repr { + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) => false, + BackendRepr::Vector { .. } => true, + BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if contains_vector(cx, layout.field(cx, i)) { return true; @@ -223,9 +225,9 @@ where // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs. && abi != SpecAbi::RustIntrinsic { - let has_float = match fn_abi.ret.layout.abi { - Abi::Scalar(s) => matches!(s.primitive(), Float(_)), - Abi::ScalarPair(s1, s2) => { + let has_float = match fn_abi.ret.layout.backend_repr { + BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)), + BackendRepr::ScalarPair(s1, s2) => { matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_)) } _ => false, // anyway not passed via registers on x86 diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index 9910e623ac9b5..bd101b23ea16b 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -1,8 +1,10 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp +use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; + +use crate::abi; use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind}; -use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; /// Classification of "eightbyte" components. // N.B., the order of the variants is from general to specific, @@ -46,17 +48,17 @@ where return Ok(()); } - let mut c = match layout.abi { - Abi::Uninhabited => return Ok(()), + let mut c = match layout.backend_repr { + BackendRepr::Uninhabited => return Ok(()), - Abi::Scalar(scalar) => match scalar.primitive() { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => Class::Int, abi::Float(_) => Class::Sse, }, - Abi::Vector { .. } => Class::Sse, + BackendRepr::Vector { .. } => Class::Sse, - Abi::ScalarPair(..) | Abi::Aggregate { .. } => { + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { let field_off = off + layout.fields.offset(i); classify(cx, layout.field(cx, i), cls, field_off)?; diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index e5a20b248e43b..83d94cb11bafd 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -1,25 +1,28 @@ +use rustc_abi::{BackendRepr, Float, Primitive}; + use crate::abi::call::{ArgAbi, FnAbi, Reg}; -use crate::abi::{Abi, Float, Primitive}; use crate::spec::HasTargetSpec; // Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing pub(crate) fn compute_abi_info(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) { let fixup = |a: &mut ArgAbi<'_, Ty>| { - match a.layout.abi { - Abi::Uninhabited | Abi::Aggregate { sized: false } => {} - Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() { - 8 => a.cast_to(Reg::i8()), - 16 => a.cast_to(Reg::i16()), - 32 => a.cast_to(Reg::i32()), - 64 => a.cast_to(Reg::i64()), - _ => a.make_indirect(), - }, - Abi::Vector { .. } => { + match a.layout.backend_repr { + BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {} + BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => { + match a.layout.size.bits() { + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), + _ => a.make_indirect(), + } + } + BackendRepr::Vector { .. } => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } - Abi::Scalar(scalar) => { + BackendRepr::Scalar(scalar) => { // Match what LLVM does for `f128` so that `compiler-builtins` builtins match up // with what LLVM expects. if a.layout.size.bytes() > 8 diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index e1728b08a396b..9d313d1650032 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -6,7 +6,7 @@ //! Section 2.3 from the Xtensa programmers guide. use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; -use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface}; +use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface}; use crate::spec::HasTargetSpec; const NUM_ARG_GPRS: u64 = 6; @@ -114,8 +114,8 @@ where } fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs index a068f25fe35ec..3ddf023cf97da 100644 --- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs +++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs @@ -18,7 +18,7 @@ use rustc_middle::ty::{ }; use rustc_span::Span; use rustc_span::symbol::Symbol; -use rustc_target::abi::Abi; +use rustc_target::abi::BackendRepr; use smallvec::SmallVec; use tracing::{debug, instrument}; @@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method: // e.g., `Rc<()>` let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id); - match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) { - Ok(Abi::Scalar(..)) => (), + match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) { + Ok(BackendRepr::Scalar(..)) => (), abi => { tcx.dcx().span_delayed_bug( tcx.def_span(method_def_id), @@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method: // e.g., `Rc` let trait_object_receiver = receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id); - match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) { - Ok(Abi::ScalarPair(..)) => (), + match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) { + Ok(BackendRepr::ScalarPair(..)) => (), abi => { tcx.dcx().span_delayed_bug( tcx.def_span(method_def_id), diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 48149a08de881..722ef5f456982 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -1,7 +1,7 @@ use std::iter; use rustc_abi::Primitive::Pointer; -use rustc_abi::{Abi, PointerKind, Scalar, Size}; +use rustc_abi::{BackendRepr, PointerKind, Scalar, Size}; use rustc_hir as hir; use rustc_hir::lang_items::LangItem; use rustc_middle::bug; @@ -469,7 +469,7 @@ fn fn_abi_sanity_check<'tcx>( // careful. Scalar/ScalarPair is fine, since backends will generally use // `layout.abi` and ignore everything else. We should just reject `Aggregate` // entirely here, but some targets need to be fixed first. - if matches!(arg.layout.abi, Abi::Aggregate { .. }) { + if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) { // For an unsized type we'd only pass the sized prefix, so there is no universe // in which we ever want to allow this. assert!( @@ -500,7 +500,7 @@ fn fn_abi_sanity_check<'tcx>( // Similar to `Direct`, we need to make sure that backends use `layout.abi` and // ignore the rest of the layout. assert!( - matches!(arg.layout.abi, Abi::ScalarPair(..)), + matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)), "PassMode::Pair for type {}", arg.layout.ty ); @@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>( fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) { // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended, // but who knows what breaks if we change this now. - if matches!(arg.layout.abi, Abi::Aggregate { .. }) { + if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) { assert!( - arg.layout.abi.is_sized(), + arg.layout.backend_repr.is_sized(), "'unadjusted' ABI does not support unsized arguments" ); } @@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>( // FIXME (mikeyhew) change this to use &own if it is ever added to the language Ty::new_mut_ptr(tcx, layout.ty) } else { - match layout.abi { - Abi::ScalarPair(..) | Abi::Scalar(..) => (), + match layout.backend_repr { + BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (), _ => bug!("receiver type has unsupported layout: {:?}", layout), } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 94b80e2694d8d..5ca7afe245396 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -5,8 +5,9 @@ use hir::def_id::DefId; use rustc_abi::Integer::{I8, I32}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ - Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError, - LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange, + AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, + LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, + Variants, WrappingRange, }; use rustc_index::bit_set::BitSet; use rustc_index::{IndexSlice, IndexVec}; @@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>( let mut layout = LayoutData::clone(&layout.0); match *pat { ty::PatternKind::Range { start, end, include_end } => { - if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi { + if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) = + &mut layout.backend_repr + { if let Some(start) = start { scalar.valid_range_mut().start = start .try_to_bits(tcx, param_env) @@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>( return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr))); } - let Abi::Scalar(metadata) = metadata_layout.abi else { + let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else { return Err(error(cx, LayoutError::Unknown(pointee))); }; @@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>( .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) { - Abi::Uninhabited + BackendRepr::Uninhabited } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let largest_niche = if count != 0 { element.largest_niche } else { None }; @@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: element.size, count }, - abi, + backend_repr: abi, largest_niche, align: element.align, size, @@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: element.size, count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: element.align, size: Size::ZERO, @@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>( ty::Str => tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: dl.i8_align, size: Size::ZERO, @@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>( &ReprOptions::default(), StructKind::AlwaysSized, )?; - match unit.abi { - Abi::Aggregate { ref mut sized } => *sized = false, + match unit.backend_repr { + BackendRepr::Memory { ref mut sized } => *sized = false, _ => bug!(), } tcx.mk_layout(unit) @@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>( // Compute the ABI of the element type: let e_ly = cx.layout_of(e_ty)?; - let Abi::Scalar(e_abi) = e_ly.abi else { + let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else { // This error isn't caught in typeck, e.g., if // the element type of the vector is generic. tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty }); @@ -516,12 +519,12 @@ fn layout_of_uncached<'tcx>( // Non-power-of-two vectors have padding up to the next power-of-two. // If we're a packed repr, remove the padding while keeping the alignment as close // to a vector as possible. - (Abi::Aggregate { sized: true }, AbiAndPrefAlign { + (BackendRepr::Memory { sized: true }, AbiAndPrefAlign { abi: Align::max_for_offset(size), pref: dl.vector_align(size).pref, }) } else { - (Abi::Vector { element: e_abi, count: e_len }, dl.vector_align(size)) + (BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size)) }; let size = size.align_to(align.abi); @@ -535,7 +538,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields, - abi, + backend_repr: abi, largest_niche: e_ly.largest_niche, size, align, @@ -985,10 +988,12 @@ fn coroutine_layout<'tcx>( size = size.align_to(align.abi); - let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited + let abi = if prefix.backend_repr.is_uninhabited() + || variants.iter().all(|v| v.backend_repr.is_uninhabited()) + { + BackendRepr::Uninhabited } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let layout = tcx.mk_layout(LayoutData { @@ -999,7 +1004,7 @@ fn coroutine_layout<'tcx>( variants, }, fields: outer_fields, - abi, + backend_repr: abi, // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to // self-referentiality), getting the discriminant can cause aliasing violations. // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 3db5a4f1805c6..f43feb552b289 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -66,12 +66,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) { // Verify the ABI mandated alignment and size. - let align = layout.abi.inherent_align(cx).map(|align| align.abi); - let size = layout.abi.inherent_size(cx); + let align = layout.backend_repr.inherent_align(cx).map(|align| align.abi); + let size = layout.backend_repr.inherent_size(cx); let Some((align, size)) = align.zip(size) else { assert_matches!( - layout.layout.abi(), - Abi::Uninhabited | Abi::Aggregate { .. }, + layout.layout.backend_repr(), + BackendRepr::Uninhabited | BackendRepr::Memory { .. }, "ABI unexpectedly missing alignment and/or size in {layout:#?}" ); return; @@ -88,12 +88,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa ); // Verify per-ABI invariants - match layout.layout.abi() { - Abi::Scalar(_) => { + match layout.layout.backend_repr() { + BackendRepr::Scalar(_) => { // Check that this matches the underlying field. let inner = skip_newtypes(cx, layout); assert!( - matches!(inner.layout.abi(), Abi::Scalar(_)), + matches!(inner.layout.backend_repr(), BackendRepr::Scalar(_)), "`Scalar` type {} is newtype around non-`Scalar` type {}", layout.ty, inner.ty @@ -132,7 +132,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`Scalar` field with bad align in {inner:#?}", ); assert!( - matches!(field.abi, Abi::Scalar(_)), + matches!(field.backend_repr, BackendRepr::Scalar(_)), "`Scalar` field with bad ABI in {inner:#?}", ); } @@ -141,11 +141,11 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa } } } - Abi::ScalarPair(scalar1, scalar2) => { + BackendRepr::ScalarPair(scalar1, scalar2) => { // Check that the underlying pair of fields matches. let inner = skip_newtypes(cx, layout); assert!( - matches!(inner.layout.abi(), Abi::ScalarPair(..)), + matches!(inner.layout.backend_repr(), BackendRepr::ScalarPair(..)), "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}", layout.ty, inner.ty @@ -208,8 +208,8 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`ScalarPair` first field with bad align in {inner:#?}", ); assert_matches!( - field1.abi, - Abi::Scalar(_), + field1.backend_repr, + BackendRepr::Scalar(_), "`ScalarPair` first field with bad ABI in {inner:#?}", ); let field2_offset = size1.align_to(align2); @@ -226,16 +226,16 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`ScalarPair` second field with bad align in {inner:#?}", ); assert_matches!( - field2.abi, - Abi::Scalar(_), + field2.backend_repr, + BackendRepr::Scalar(_), "`ScalarPair` second field with bad ABI in {inner:#?}", ); } - Abi::Vector { element, .. } => { + BackendRepr::Vector { element, .. } => { assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`. // FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair. } - Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check. + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} // Nothing to check. } } @@ -274,13 +274,13 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa // The top-level ABI and the ABI of the variants should be coherent. let scalar_coherent = |s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx); - let abi_coherent = match (layout.abi, variant.abi) { - (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2), - (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => { + let abi_coherent = match (layout.backend_repr, variant.backend_repr) { + (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => scalar_coherent(s1, s2), + (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => { scalar_coherent(a1, a2) && scalar_coherent(b1, b2) } - (Abi::Uninhabited, _) => true, - (Abi::Aggregate { .. }, _) => true, + (BackendRepr::Uninhabited, _) => true, + (BackendRepr::Memory { .. }, _) => true, _ => false, }; if !abi_coherent { From 6d5d8b5071d4982d0886379f4e557556cc761469 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:37:59 -0700 Subject: [PATCH 2/7] cg_clif: `rustc_abi::Abi` => `BackendRepr` --- .../rustc_codegen_cranelift/src/abi/mod.rs | 2 +- .../src/abi/pass_mode.rs | 28 ++++----- compiler/rustc_codegen_cranelift/src/base.rs | 8 +-- .../src/discriminant.rs | 4 +- .../src/intrinsics/mod.rs | 10 ++-- compiler/rustc_codegen_cranelift/src/lib.rs | 2 +- .../src/value_and_place.rs | 57 +++++++++++-------- .../rustc_codegen_cranelift/src/vtable.rs | 4 +- 8 files changed, 62 insertions(+), 53 deletions(-) diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 892ec3e95855e..089b09d06aef2 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -193,7 +193,7 @@ fn make_local_place<'tcx>( ); } let place = if is_ssa { - if let rustc_target::abi::Abi::ScalarPair(_, _) = layout.abi { + if let BackendRepr::ScalarPair(_, _) = layout.backend_repr { CPlace::new_var_pair(fx, local, layout) } else { CPlace::new_var(fx, local, layout) diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index 38c322b5e0450..ad0a13dc7e574 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -78,19 +78,19 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { fn get_abi_param(&self, tcx: TyCtxt<'tcx>) -> SmallVec<[AbiParam; 2]> { match self.mode { PassMode::Ignore => smallvec![], - PassMode::Direct(attrs) => match self.layout.abi { - Abi::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param( + PassMode::Direct(attrs) => match self.layout.backend_repr { + BackendRepr::Scalar(scalar) => smallvec![apply_arg_attrs_to_abi_param( AbiParam::new(scalar_to_clif_type(tcx, scalar)), attrs )], - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); smallvec![AbiParam::new(vector_ty)] } - _ => unreachable!("{:?}", self.layout.abi), + _ => unreachable!("{:?}", self.layout.backend_repr), }, - PassMode::Pair(attrs_a, attrs_b) => match self.layout.abi { - Abi::ScalarPair(a, b) => { + PassMode::Pair(attrs_a, attrs_b) => match self.layout.backend_repr { + BackendRepr::ScalarPair(a, b) => { let a = scalar_to_clif_type(tcx, a); let b = scalar_to_clif_type(tcx, b); smallvec![ @@ -98,7 +98,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { apply_arg_attrs_to_abi_param(AbiParam::new(b), attrs_b), ] } - _ => unreachable!("{:?}", self.layout.abi), + _ => unreachable!("{:?}", self.layout.backend_repr), }, PassMode::Cast { ref cast, pad_i32 } => { assert!(!pad_i32, "padding support not yet implemented"); @@ -130,23 +130,23 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { fn get_abi_return(&self, tcx: TyCtxt<'tcx>) -> (Option, Vec) { match self.mode { PassMode::Ignore => (None, vec![]), - PassMode::Direct(_) => match self.layout.abi { - Abi::Scalar(scalar) => { + PassMode::Direct(_) => match self.layout.backend_repr { + BackendRepr::Scalar(scalar) => { (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))]) } - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); (None, vec![AbiParam::new(vector_ty)]) } - _ => unreachable!("{:?}", self.layout.abi), + _ => unreachable!("{:?}", self.layout.backend_repr), }, - PassMode::Pair(_, _) => match self.layout.abi { - Abi::ScalarPair(a, b) => { + PassMode::Pair(_, _) => match self.layout.backend_repr { + BackendRepr::ScalarPair(a, b) => { let a = scalar_to_clif_type(tcx, a); let b = scalar_to_clif_type(tcx, b); (None, vec![AbiParam::new(a), AbiParam::new(b)]) } - _ => unreachable!("{:?}", self.layout.abi), + _ => unreachable!("{:?}", self.layout.backend_repr), }, PassMode::Cast { ref cast, .. } => { (None, cast_target_to_abi_params(cast).into_iter().collect()) diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index a681e6d9f3cd1..99e39971b7479 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -290,7 +290,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) { let arg_uninhabited = fx .mir .args_iter() - .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).abi.is_uninhabited()); + .any(|arg| fx.layout_of(fx.monomorphize(fx.mir.local_decls[arg].ty)).is_uninhabited()); if arg_uninhabited { fx.bcx.append_block_params_for_function_params(fx.block_map[START_BLOCK]); fx.bcx.switch_to_block(fx.block_map[START_BLOCK]); @@ -644,9 +644,9 @@ fn codegen_stmt<'tcx>( _ => unreachable!("un op Neg for {:?}", layout.ty), } } - UnOp::PtrMetadata => match layout.abi { - Abi::Scalar(_) => CValue::zst(dest_layout), - Abi::ScalarPair(_, _) => { + UnOp::PtrMetadata => match layout.backend_repr { + BackendRepr::Scalar(_) => CValue::zst(dest_layout), + BackendRepr::ScalarPair(_, _) => { CValue::by_val(operand.load_scalar_pair(fx).1, dest_layout) } _ => bug!("Unexpected `PtrToMetadata` operand: {operand:?}"), diff --git a/compiler/rustc_codegen_cranelift/src/discriminant.rs b/compiler/rustc_codegen_cranelift/src/discriminant.rs index d462dcd63a925..45794a4266589 100644 --- a/compiler/rustc_codegen_cranelift/src/discriminant.rs +++ b/compiler/rustc_codegen_cranelift/src/discriminant.rs @@ -14,7 +14,7 @@ pub(crate) fn codegen_set_discriminant<'tcx>( variant_index: VariantIdx, ) { let layout = place.layout(); - if layout.for_variant(fx, variant_index).abi.is_uninhabited() { + if layout.for_variant(fx, variant_index).is_uninhabited() { return; } match layout.variants { @@ -80,7 +80,7 @@ pub(crate) fn codegen_get_discriminant<'tcx>( ) { let layout = value.layout(); - if layout.abi.is_uninhabited() { + if layout.is_uninhabited() { return; } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 35f0ccff3f99e..aae6794891d76 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -51,8 +51,8 @@ fn report_atomic_type_validation_error<'tcx>( } pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type { - let (element, count) = match layout.abi { - Abi::Vector { element, count } => (element, count), + let (element, count) = match layout.backend_repr { + BackendRepr::Vector { element, count } => (element, count), _ => unreachable!(), }; @@ -505,7 +505,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let layout = fx.layout_of(generic_args.type_at(0)); // Note: Can't use is_unsized here as truly unsized types need to take the fixed size // branch - let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi { + let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr { Some(ptr.load_scalar_pair(fx).1) } else { None @@ -519,7 +519,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let layout = fx.layout_of(generic_args.type_at(0)); // Note: Can't use is_unsized here as truly unsized types need to take the fixed size // branch - let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi { + let meta = if let BackendRepr::ScalarPair(_, _) = ptr.layout().backend_repr { Some(ptr.load_scalar_pair(fx).1) } else { None @@ -693,7 +693,7 @@ fn codegen_regular_intrinsic_call<'tcx>( let layout = fx.layout_of(ty); let msg_str = with_no_visible_paths!({ with_no_trimmed_paths!({ - if layout.abi.is_uninhabited() { + if layout.is_uninhabited() { // Use this error even for the other intrinsics as it is more precise. format!("attempted to instantiate uninhabited type `{}`", ty) } else if intrinsic == sym::assert_zero_valid { diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs index b6f9ce8fc2988..602b1b9820023 100644 --- a/compiler/rustc_codegen_cranelift/src/lib.rs +++ b/compiler/rustc_codegen_cranelift/src/lib.rs @@ -92,6 +92,7 @@ mod prelude { StackSlotData, StackSlotKind, TrapCode, Type, Value, types, }; pub(crate) use cranelift_module::{self, DataDescription, FuncId, Linkage, Module}; + pub(crate) use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx}; pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; pub(crate) use rustc_hir::def_id::{DefId, LOCAL_CRATE}; pub(crate) use rustc_index::Idx; @@ -101,7 +102,6 @@ mod prelude { self, FloatTy, Instance, InstanceKind, IntTy, ParamEnv, Ty, TyCtxt, UintTy, }; pub(crate) use rustc_span::Span; - pub(crate) use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Scalar, Size, VariantIdx}; pub(crate) use crate::abi::*; pub(crate) use crate::base::{codegen_operand, codegen_place}; diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index fd77502224e4c..900d7e69714ea 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -131,8 +131,8 @@ impl<'tcx> CValue<'tcx> { match self.0 { CValueInner::ByRef(ptr, None) => { - let (a_scalar, b_scalar) = match self.1.abi { - Abi::ScalarPair(a, b) => (a, b), + let (a_scalar, b_scalar) = match self.1.backend_repr { + BackendRepr::ScalarPair(a, b) => (a, b), _ => unreachable!("dyn_star_force_data_on_stack({:?})", self), }; let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); @@ -164,15 +164,15 @@ impl<'tcx> CValue<'tcx> { } } - /// Load a value with layout.abi of scalar + /// Load a value with layout.backend_repr of scalar #[track_caller] pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, None) => { - let clif_ty = match layout.abi { - Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), - Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) + let clif_ty = match layout.backend_repr { + BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), + BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) .by(u32::try_from(count).unwrap()) .unwrap(), _ => unreachable!("{:?}", layout.ty), @@ -187,14 +187,14 @@ impl<'tcx> CValue<'tcx> { } } - /// Load a value pair with layout.abi of scalar pair + /// Load a value pair with layout.backend_repr of scalar pair #[track_caller] pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, None) => { - let (a_scalar, b_scalar) = match layout.abi { - Abi::ScalarPair(a, b) => (a, b), + let (a_scalar, b_scalar) = match layout.backend_repr { + BackendRepr::ScalarPair(a, b) => (a, b), _ => unreachable!("load_scalar_pair({:?})", self), }; let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); @@ -222,8 +222,8 @@ impl<'tcx> CValue<'tcx> { let layout = self.1; match self.0 { CValueInner::ByVal(_) => unreachable!(), - CValueInner::ByValPair(val1, val2) => match layout.abi { - Abi::ScalarPair(_, _) => { + CValueInner::ByValPair(val1, val2) => match layout.backend_repr { + BackendRepr::ScalarPair(_, _) => { let val = match field.as_u32() { 0 => val1, 1 => val2, @@ -232,7 +232,7 @@ impl<'tcx> CValue<'tcx> { let field_layout = layout.field(&*fx, usize::from(field)); CValue::by_val(val, field_layout) } - _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi), + _ => unreachable!("value_field for ByValPair with abi {:?}", layout.backend_repr), }, CValueInner::ByRef(ptr, None) => { let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field); @@ -360,7 +360,7 @@ impl<'tcx> CValue<'tcx> { pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self { assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); - assert_eq!(self.layout().abi, layout.abi); + assert_eq!(self.layout().backend_repr, layout.backend_repr); CValue(self.0, layout) } } @@ -609,8 +609,8 @@ impl<'tcx> CPlace<'tcx> { let dst_layout = self.layout(); match self.inner { CPlaceInner::Var(_local, var) => { - let data = match from.1.abi { - Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx), + let data = match from.1.backend_repr { + BackendRepr::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx), _ => { let (ptr, meta) = from.force_stack(fx); assert!(meta.is_none()); @@ -621,8 +621,10 @@ impl<'tcx> CPlace<'tcx> { transmute_scalar(fx, var, data, dst_ty); } CPlaceInner::VarPair(_local, var1, var2) => { - let (data1, data2) = match from.1.abi { - Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx), + let (data1, data2) = match from.1.backend_repr { + BackendRepr::ScalarPair(_, _) => { + CValue(from.0, dst_layout).load_scalar_pair(fx) + } _ => { let (ptr, meta) = from.force_stack(fx); assert!(meta.is_none()); @@ -635,7 +637,9 @@ impl<'tcx> CPlace<'tcx> { } CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self), CPlaceInner::Addr(to_ptr, None) => { - if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited { + if dst_layout.size == Size::ZERO + || dst_layout.backend_repr == BackendRepr::Uninhabited + { return; } @@ -646,23 +650,28 @@ impl<'tcx> CPlace<'tcx> { CValueInner::ByVal(val) => { to_ptr.store(fx, val, flags); } - CValueInner::ByValPair(val1, val2) => match from.layout().abi { - Abi::ScalarPair(a_scalar, b_scalar) => { + CValueInner::ByValPair(val1, val2) => match from.layout().backend_repr { + BackendRepr::ScalarPair(a_scalar, b_scalar) => { let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); to_ptr.store(fx, val1, flags); to_ptr.offset(fx, b_offset).store(fx, val2, flags); } - _ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi), + _ => { + bug!( + "Non ScalarPair repr {:?} for ByValPair CValue", + dst_layout.backend_repr + ) + } }, CValueInner::ByRef(from_ptr, None) => { - match from.layout().abi { - Abi::Scalar(_) => { + match from.layout().backend_repr { + BackendRepr::Scalar(_) => { let val = from.load_scalar(fx); to_ptr.store(fx, val, flags); return; } - Abi::ScalarPair(a_scalar, b_scalar) => { + BackendRepr::ScalarPair(a_scalar, b_scalar) => { let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); let (val1, val2) = from.load_scalar_pair(fx); diff --git a/compiler/rustc_codegen_cranelift/src/vtable.rs b/compiler/rustc_codegen_cranelift/src/vtable.rs index 14c607ccad7d7..82b6178be9dcd 100644 --- a/compiler/rustc_codegen_cranelift/src/vtable.rs +++ b/compiler/rustc_codegen_cranelift/src/vtable.rs @@ -47,7 +47,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>( idx: usize, ) -> (Pointer, Value) { let (ptr, vtable) = 'block: { - if let Abi::Scalar(_) = arg.layout().abi { + if let BackendRepr::Scalar(_) = arg.layout().backend_repr { while !arg.layout().ty.is_unsafe_ptr() && !arg.layout().ty.is_ref() { let (idx, _) = arg .layout() @@ -68,7 +68,7 @@ pub(crate) fn get_ptr_and_method_ref<'tcx>( } } - if let Abi::ScalarPair(_, _) = arg.layout().abi { + if let BackendRepr::ScalarPair(_, _) = arg.layout().backend_repr { let (ptr, vtable) = arg.load_scalar_pair(fx); (Pointer::new(ptr), vtable) } else { From 03492099014ce0cd9c105b3e0995f3ecc966b3d8 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:38:09 -0700 Subject: [PATCH 3/7] cg_gcc: `rustc_abi::Abi` => `BackendRepr` --- compiler/rustc_codegen_gcc/src/builder.rs | 4 +- .../rustc_codegen_gcc/src/intrinsic/mod.rs | 6 +-- compiler/rustc_codegen_gcc/src/type_of.rs | 37 +++++++++++-------- 3 files changed, 26 insertions(+), 21 deletions(-) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 7c52cba096b40..e6ae7cf174d0e 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -1016,11 +1016,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { OperandValue::Ref(place.val) } else if place.layout.is_gcc_immediate() { let load = self.load(place.layout.gcc_type(self), place.val.llval, place.val.align); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + if let abi::BackendRepr::Scalar(ref scalar) = place.layout.backend_repr { scalar_load_metadata(self, load, scalar); } OperandValue::Immediate(self.to_immediate(load, place.layout)) - } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + } else if let abi::BackendRepr::ScalarPair(ref a, ref b) = place.layout.backend_repr { let b_offset = a.size(self).align_to(b.align(self).abi); let mut load = |i, scalar: &abi::Scalar, align| { diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 972d66321403d..b0298a35cb083 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -294,13 +294,13 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } sym::raw_eq => { - use rustc_target::abi::Abi::*; + use rustc_abi::BackendRepr::*; let tp_ty = fn_args.type_at(0); let layout = self.layout_of(tp_ty).layout; - let _use_integer_compare = match layout.abi() { + let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, Uninhabited | Vector { .. } => false, - Aggregate { .. } => { + Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index db874afe1ab9b..0efdf36da485e 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -3,7 +3,7 @@ use std::fmt::Write; use gccjit::{Struct, Type}; use rustc_abi as abi; use rustc_abi::Primitive::*; -use rustc_abi::{Abi, FieldsShape, Integer, PointeeInfo, Size, Variants}; +use rustc_abi::{BackendRepr, FieldsShape, Integer, PointeeInfo, Size, Variants}; use rustc_codegen_ssa::traits::{ BaseTypeCodegenMethods, DerivedTypeCodegenMethods, LayoutTypeCodegenMethods, }; @@ -60,9 +60,9 @@ fn uncached_gcc_type<'gcc, 'tcx>( layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>, ) -> Type<'gcc> { - match layout.abi { - Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { ref element, count } => { + match layout.backend_repr { + BackendRepr::Scalar(_) => bug!("handled elsewhere"), + BackendRepr::Vector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); let element = // NOTE: gcc doesn't allow pointer types in vectors. @@ -74,7 +74,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( }; return cx.context.new_vector_type(element, count); } - Abi::ScalarPair(..) => { + BackendRepr::ScalarPair(..) => { return cx.type_struct( &[ layout.scalar_pair_element_gcc_type(cx, 0), @@ -83,7 +83,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( false, ); } - Abi::Uninhabited | Abi::Aggregate { .. } => {} + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} } let name = match *layout.ty.kind() { @@ -176,16 +176,21 @@ pub trait LayoutGccExt<'tcx> { impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + false + } } } fn is_gcc_scalar_pair(&self) -> bool { - match self.abi { - Abi::ScalarPair(..) => true, - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::ScalarPair(..) => true, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::Vector { .. } + | BackendRepr::Memory { .. } => false, } } @@ -205,7 +210,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - if let Abi::Scalar(ref scalar) = self.abi { + if let BackendRepr::Scalar(ref scalar) = self.backend_repr { // Use a different cache for scalars because pointers to DSTs // can be either wide or thin (data pointers of wide pointers). if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) { @@ -261,7 +266,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { - if let Abi::Scalar(ref scalar) = self.abi { + if let BackendRepr::Scalar(ref scalar) = self.backend_repr { if scalar.is_bool() { return cx.type_i1(); } @@ -299,8 +304,8 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - let (a, b) = match self.abi { - Abi::ScalarPair(ref a, ref b) => (a, b), + let (a, b) = match self.backend_repr { + BackendRepr::ScalarPair(ref a, ref b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), }; let scalar = [a, b][index]; From 3059ed8fa6ad148fe2a67303114da7a5f2bba43c Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:38:16 -0700 Subject: [PATCH 4/7] miri: `rustc_abi::Abi` => `BackendRepr` --- src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs | 7 +++++-- src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs | 7 +++++-- src/tools/miri/src/helpers.rs | 8 ++++++-- src/tools/miri/src/lib.rs | 1 + src/tools/miri/src/operator.rs | 2 +- src/tools/miri/src/shims/native_lib.rs | 4 ++-- 6 files changed, 20 insertions(+), 9 deletions(-) diff --git a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs index fdc7a675fb713..47fe41d9ecdd8 100644 --- a/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs +++ b/src/tools/miri/src/borrow_tracker/stacked_borrows/mod.rs @@ -9,11 +9,11 @@ use std::cell::RefCell; use std::fmt::Write; use std::{cmp, mem}; +use rustc_abi::{BackendRepr, Size}; use rustc_data_structures::fx::FxHashSet; use rustc_middle::mir::{Mutability, RetagKind}; use rustc_middle::ty::layout::HasParamEnv; use rustc_middle::ty::{self, Ty}; -use rustc_target::abi::{Abi, Size}; use self::diagnostics::{RetagCause, RetagInfo}; pub use self::item::{Item, Permission}; @@ -972,7 +972,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { RetagFields::OnlyScalar => { // Matching `ArgAbi::new` at the time of writing, only fields of // `Scalar` and `ScalarPair` ABI are considered. - matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) + matches!( + place.layout.backend_repr, + BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) + ) } }; if recurse { diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs index acfb76030f5ff..40467aa4bc1be 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs @@ -1,8 +1,8 @@ +use rustc_abi::{BackendRepr, Size}; use rustc_middle::mir::{Mutability, RetagKind}; use rustc_middle::ty::layout::HasParamEnv; use rustc_middle::ty::{self, Ty}; use rustc_span::def_id::DefId; -use rustc_target::abi::{Abi, Size}; use crate::borrow_tracker::{GlobalState, GlobalStateInner, ProtectorKind}; use crate::concurrency::data_race::NaReadType; @@ -495,7 +495,10 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { RetagFields::OnlyScalar => { // Matching `ArgAbi::new` at the time of writing, only fields of // `Scalar` and `ScalarPair` ABI are considered. - matches!(place.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) + matches!( + place.layout.backend_repr, + BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) + ) } }; if recurse { diff --git a/src/tools/miri/src/helpers.rs b/src/tools/miri/src/helpers.rs index d35cbf242f5d1..17f664da85382 100644 --- a/src/tools/miri/src/helpers.rs +++ b/src/tools/miri/src/helpers.rs @@ -349,8 +349,12 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { i: impl Into, dest: &impl Writeable<'tcx, Provenance>, ) -> InterpResult<'tcx> { - assert!(dest.layout().abi.is_scalar(), "write_int on non-scalar type {}", dest.layout().ty); - let val = if dest.layout().abi.is_signed() { + assert!( + dest.layout().backend_repr.is_scalar(), + "write_int on non-scalar type {}", + dest.layout().ty + ); + let val = if dest.layout().backend_repr.is_signed() { Scalar::from_int(i, dest.layout().size) } else { // `unwrap` can only fail here if `i` is negative diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs index 938d1ca319e0f..f903ccbc25a74 100644 --- a/src/tools/miri/src/lib.rs +++ b/src/tools/miri/src/lib.rs @@ -55,6 +55,7 @@ extern crate either; extern crate tracing; // The rustc crates we need +extern crate rustc_abi; extern crate rustc_apfloat; extern crate rustc_ast; extern crate rustc_attr; diff --git a/src/tools/miri/src/operator.rs b/src/tools/miri/src/operator.rs index 8e06f4258d614..608e23fc1108a 100644 --- a/src/tools/miri/src/operator.rs +++ b/src/tools/miri/src/operator.rs @@ -23,7 +23,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { interp_ok(match bin_op { Eq | Ne | Lt | Le | Gt | Ge => { - assert_eq!(left.layout.abi, right.layout.abi); // types can differ, e.g. fn ptrs with different `for` + assert_eq!(left.layout.backend_repr, right.layout.backend_repr); // types can differ, e.g. fn ptrs with different `for` let size = this.pointer_size(); // Just compare the bits. ScalarPairs are compared lexicographically. // We thus always compare pairs and simply fill scalars up with 0. diff --git a/src/tools/miri/src/shims/native_lib.rs b/src/tools/miri/src/shims/native_lib.rs index 3f282017bb79d..525bcd381d52c 100644 --- a/src/tools/miri/src/shims/native_lib.rs +++ b/src/tools/miri/src/shims/native_lib.rs @@ -5,7 +5,7 @@ use libffi::high::call as ffi; use libffi::low::CodePtr; use rustc_middle::ty::{self as ty, IntTy, UintTy}; use rustc_span::Symbol; -use rustc_target::abi::{Abi, HasDataLayout}; +use rustc_abi::{BackendRepr, HasDataLayout}; use crate::*; @@ -149,7 +149,7 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { // Get the function arguments, and convert them to `libffi`-compatible form. let mut libffi_args = Vec::::with_capacity(args.len()); for arg in args.iter() { - if !matches!(arg.layout.abi, Abi::Scalar(_)) { + if !matches!(arg.layout.backend_repr, BackendRepr::Scalar(_)) { throw_unsup_format!("only scalar argument types are support for native calls") } libffi_args.push(imm_to_carg(this.read_immediate(arg)?, this)?); From 11f9217a3391baf7bc9bfac2d3fee115438c983b Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:38:30 -0700 Subject: [PATCH 5/7] rust-analyzer: `rustc_abi::Abi` => `BackendRepr` --- .../rust-analyzer/crates/hir-ty/src/layout.rs | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs index 9f4cc98993e24..c5fa20bc8acbc 100644 --- a/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs +++ b/src/tools/rust-analyzer/crates/hir-ty/src/layout.rs @@ -6,7 +6,7 @@ use base_db::ra_salsa::Cycle; use chalk_ir::{AdtId, FloatTy, IntTy, TyKind, UintTy}; use hir_def::{ layout::{ - Abi, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError, LayoutData, + BackendRepr, FieldsShape, Float, Integer, LayoutCalculator, LayoutCalculatorError, LayoutData, Primitive, ReprOptions, Scalar, Size, StructKind, TargetDataLayout, WrappingRange, }, LocalFieldId, StructId, @@ -168,7 +168,7 @@ fn layout_of_simd_ty( // Compute the ABI of the element type: let e_ly = db.layout_of_ty(e_ty, env)?; - let Abi::Scalar(e_abi) = e_ly.abi else { + let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else { return Err(LayoutError::Unknown); }; @@ -190,7 +190,7 @@ fn layout_of_simd_ty( Ok(Arc::new(Layout { variants: Variants::Single { index: struct_variant_idx() }, fields, - abi: Abi::Vector { element: e_abi, count: e_len }, + backend_repr: BackendRepr::Vector { element: e_abi, count: e_len }, largest_niche: e_ly.largest_niche, size, align, @@ -294,10 +294,10 @@ pub fn layout_of_ty_query( .checked_mul(count, dl) .ok_or(LayoutError::BadCalc(LayoutCalculatorError::SizeOverflow))?; - let abi = if count != 0 && matches!(element.abi, Abi::Uninhabited) { - Abi::Uninhabited + let backend_repr = if count != 0 && matches!(element.backend_repr, BackendRepr::Uninhabited) { + BackendRepr::Uninhabited } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let largest_niche = if count != 0 { element.largest_niche } else { None }; @@ -305,7 +305,7 @@ pub fn layout_of_ty_query( Layout { variants: Variants::Single { index: struct_variant_idx() }, fields: FieldsShape::Array { stride: element.size, count }, - abi, + backend_repr, largest_niche, align: element.align, size, @@ -318,7 +318,7 @@ pub fn layout_of_ty_query( Layout { variants: Variants::Single { index: struct_variant_idx() }, fields: FieldsShape::Array { stride: element.size, count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: element.align, size: Size::ZERO, @@ -329,7 +329,7 @@ pub fn layout_of_ty_query( TyKind::Str => Layout { variants: Variants::Single { index: struct_variant_idx() }, fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: dl.i8_align, size: Size::ZERO, @@ -379,8 +379,8 @@ pub fn layout_of_ty_query( TyKind::Never => cx.calc.layout_of_never_type(), TyKind::Dyn(_) | TyKind::Foreign(_) => { let mut unit = layout_of_unit(&cx)?; - match &mut unit.abi { - Abi::Aggregate { sized } => *sized = false, + match &mut unit.backend_repr { + BackendRepr::Memory { sized } => *sized = false, _ => return Err(LayoutError::Unknown), } unit From 0b9d1eb889e8ceaea6af4f614e747b746a68822e Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 23:22:36 -0700 Subject: [PATCH 6/7] tests: cross-compile multi-platform ZST ABI tests This allows them to be blessed, regardless of platform. --- ...nux.stderr => c-zst.aarch64-darwin.stderr} | 0 tests/ui/abi/c-zst.rs | 80 ++++++++++++++----- ...other.stderr => c-zst.x86_64-linux.stderr} | 0 tests/ui/abi/win64-zst.rs | 23 ++++-- ...r.stderr => win64-zst.x86_64-linux.stderr} | 0 ...rr => win64-zst.x86_64-windows-gnu.stderr} | 0 6 files changed, 76 insertions(+), 27 deletions(-) rename tests/ui/abi/{c-zst.other-linux.stderr => c-zst.aarch64-darwin.stderr} (100%) rename tests/ui/abi/{c-zst.other.stderr => c-zst.x86_64-linux.stderr} (100%) rename tests/ui/abi/{win64-zst.other.stderr => win64-zst.x86_64-linux.stderr} (100%) rename tests/ui/abi/{win64-zst.windows-gnu.stderr => win64-zst.x86_64-windows-gnu.stderr} (100%) diff --git a/tests/ui/abi/c-zst.other-linux.stderr b/tests/ui/abi/c-zst.aarch64-darwin.stderr similarity index 100% rename from tests/ui/abi/c-zst.other-linux.stderr rename to tests/ui/abi/c-zst.aarch64-darwin.stderr diff --git a/tests/ui/abi/c-zst.rs b/tests/ui/abi/c-zst.rs index 0cfd653b37e87..69ebefa09acba 100644 --- a/tests/ui/abi/c-zst.rs +++ b/tests/ui/abi/c-zst.rs @@ -1,27 +1,63 @@ -//@ revisions: other other-linux x86_64-pc-windows-gnu s390x-linux sparc64-linux powerpc-linux //@ normalize-stderr-test: "(abi|pref|unadjusted_abi_align): Align\([1-8] bytes\)" -> "$1: $$SOME_ALIGN" -// ZSTs are only not ignored when the target_env is "gnu", "musl" or "uclibc". However, Rust does -// not currently support any other target_env on these architectures. - -// Ignore the ZST revisions -//@[other] ignore-x86_64-pc-windows-gnu -//@[other] ignore-linux -//@[other-linux] only-linux -//@[other-linux] ignore-s390x -//@[other-linux] ignore-sparc64 -//@[other-linux] ignore-powerpc - -// Pass the ZST indirectly revisions -//@[x86_64-pc-windows-gnu] only-x86_64-pc-windows-gnu -//@[s390x-linux] only-s390x -//@[s390x-linux] only-linux -//@[sparc64-linux] only-sparc64 -//@[sparc64-linux] only-linux -//@[powerpc-linux] only-powerpc -//@[powerpc-linux] only-linux - -#![feature(rustc_attrs)] +/*! +C doesn't have zero-sized types... except it does. + +Standard C doesn't, but some C compilers, like GCC, implement ZSTs as a compiler extension. +This historically has wound up interacting with processor-specific ABIs in fairly ad-hoc ways. +e.g. despite being "zero-sized", sometimes C compilers decide ZSTs consume registers. + +That means these two function signatures may not be compatible: + +``` +extern "C" fn((), i32, i32); +extern "C" fn(i32, (), i32); +``` +*/ + +/* + * ZST IN "C" IS ZERO-SIZED + */ + +//@ revisions: aarch64-darwin +//@[aarch64-darwin] compile-flags: --target aarch64-apple-darwin +//@[aarch64-darwin] needs-llvm-components: aarch64 + +//@ revisions: x86_64-linux +//@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu +//@[x86_64-linux] needs-llvm-components: x86 + + +/* + * ZST IN "C" IS PASS-BY-POINTER + */ + +// according to the SRV4 ABI, an aggregate is always passed in registers, +// and it so happens the GCC extension for ZSTs considers them as structs. +//@ revisions: powerpc-linux +//@[powerpc-linux] compile-flags: --target powerpc-unknown-linux-gnu +//@[powerpc-linux] needs-llvm-components: powerpc + +//@ revisions: s390x-linux +//@[s390x-linux] compile-flags: --target s390x-unknown-linux-gnu +//@[s390x-linux] needs-llvm-components: systemz + +//@ revisions: sparc64-linux +//@[sparc64-linux] compile-flags: --target sparc64-unknown-linux-gnu +//@[sparc64-linux] needs-llvm-components: sparc + +// The Win64 ABI uses slightly different handling for power-of-2 sizes in the ABI, +// so GCC decided that ZSTs are pass-by-pointer, as `0.is_power_of_two() == false` +//@ revisions: x86_64-pc-windows-gnu +//@[x86_64-pc-windows-gnu] compile-flags: --target x86_64-pc-windows-gnu +//@[x86_64-pc-windows-gnu] needs-llvm-components: x86 + + +#![feature(lang_items, no_core, rustc_attrs)] +#![no_core] #![crate_type = "lib"] +#[lang = "sized"] +trait Sized {} + #[rustc_abi(debug)] extern "C" fn pass_zst(_: ()) {} //~ ERROR: fn_abi diff --git a/tests/ui/abi/c-zst.other.stderr b/tests/ui/abi/c-zst.x86_64-linux.stderr similarity index 100% rename from tests/ui/abi/c-zst.other.stderr rename to tests/ui/abi/c-zst.x86_64-linux.stderr diff --git a/tests/ui/abi/win64-zst.rs b/tests/ui/abi/win64-zst.rs index cae32795e16e7..a2f7d19eb4591 100644 --- a/tests/ui/abi/win64-zst.rs +++ b/tests/ui/abi/win64-zst.rs @@ -1,11 +1,24 @@ -//@ only-x86_64 -//@ revisions: other windows-gnu //@ normalize-stderr-test: "(abi|pref|unadjusted_abi_align): Align\([1-8] bytes\)" -> "$1: $$SOME_ALIGN" -//@[other] ignore-windows-gnu -//@[windows-gnu] only-windows-gnu +//@ only-x86_64 + +//@ revisions: x86_64-linux +//@[x86_64-linux] compile-flags: --target x86_64-unknown-linux-gnu +//@[x86_64-linux] needs-llvm-components: x86 -#![feature(rustc_attrs)] +//@ revisions: x86_64-windows-gnu +//@[x86_64-windows-gnu] compile-flags: --target x86_64-pc-windows-gnu +//@[x86_64-windows-gnu] needs-llvm-components: x86 + +//@ revisions: x86_64-windows-msvc +//@[x86_64-windows-msvc] compile-flags: --target x86_64-pc-windows-msvc +//@[x86_64-windows-msvc] needs-llvm-components: x86 + +#![feature(no_core, lang_items, rustc_attrs)] +#![no_core] #![crate_type = "lib"] +#[lang = "sized"] +trait Sized {} + #[rustc_abi(debug)] extern "win64" fn pass_zst(_: ()) {} //~ ERROR: fn_abi diff --git a/tests/ui/abi/win64-zst.other.stderr b/tests/ui/abi/win64-zst.x86_64-linux.stderr similarity index 100% rename from tests/ui/abi/win64-zst.other.stderr rename to tests/ui/abi/win64-zst.x86_64-linux.stderr diff --git a/tests/ui/abi/win64-zst.windows-gnu.stderr b/tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr similarity index 100% rename from tests/ui/abi/win64-zst.windows-gnu.stderr rename to tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr From 083a362dd113da1217f7eed0e48baa2c584ac641 Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 23:21:20 -0700 Subject: [PATCH 7/7] tests: Bless `rustc_abi::Abi::Aggregate` => `::Memory` --- tests/ui/abi/c-zst.aarch64-darwin.stderr | 6 +- tests/ui/abi/c-zst.powerpc-linux.stderr | 6 +- tests/ui/abi/c-zst.s390x-linux.stderr | 6 +- tests/ui/abi/c-zst.sparc64-linux.stderr | 6 +- tests/ui/abi/c-zst.x86_64-linux.stderr | 6 +- .../ui/abi/c-zst.x86_64-pc-windows-gnu.stderr | 6 +- tests/ui/abi/debug.stderr | 24 +++---- tests/ui/abi/sysv64-zst.stderr | 4 +- tests/ui/abi/win64-zst.x86_64-linux.stderr | 6 +- .../abi/win64-zst.x86_64-windows-gnu.stderr | 6 +- .../abi/win64-zst.x86_64-windows-msvc.stderr | 67 +++++++++++++++++++ tests/ui/layout/debug.stderr | 20 +++--- tests/ui/layout/enum-scalar-pair-int-ptr.rs | 2 +- .../ui/layout/enum-scalar-pair-int-ptr.stderr | 2 +- tests/ui/layout/hexagon-enum.stderr | 10 +-- ...-scalarpair-payload-might-be-uninit.stderr | 10 +-- .../issue-96185-overaligned-enum.stderr | 10 +-- tests/ui/layout/struct.rs | 2 +- tests/ui/layout/struct.stderr | 2 +- tests/ui/layout/thumb-enum.stderr | 10 +-- .../layout/zero-sized-array-enum-niche.stderr | 26 +++---- ...-variants.aarch64-unknown-linux-gnu.stderr | 4 +- ...-c-dead-variants.armebv7r-none-eabi.stderr | 4 +- ...-dead-variants.i686-pc-windows-msvc.stderr | 4 +- ...d-variants.x86_64-unknown-linux-gnu.stderr | 4 +- tests/ui/repr/repr-c-int-dead-variants.stderr | 4 +- .../type/pattern_types/range_patterns.stderr | 4 +- 27 files changed, 164 insertions(+), 97 deletions(-) create mode 100644 tests/ui/abi/win64-zst.x86_64-windows-msvc.stderr diff --git a/tests/ui/abi/c-zst.aarch64-darwin.stderr b/tests/ui/abi/c-zst.aarch64-darwin.stderr index 5a656e6ea66e0..7d384bc875f98 100644 --- a/tests/ui/abi/c-zst.aarch64-darwin.stderr +++ b/tests/ui/abi/c-zst.aarch64-darwin.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -36,7 +36,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -58,7 +58,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/c-zst.powerpc-linux.stderr b/tests/ui/abi/c-zst.powerpc-linux.stderr index ba9738050d87d..7980710bab676 100644 --- a/tests/ui/abi/c-zst.powerpc-linux.stderr +++ b/tests/ui/abi/c-zst.powerpc-linux.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -47,7 +47,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -69,7 +69,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/c-zst.s390x-linux.stderr b/tests/ui/abi/c-zst.s390x-linux.stderr index ba9738050d87d..7980710bab676 100644 --- a/tests/ui/abi/c-zst.s390x-linux.stderr +++ b/tests/ui/abi/c-zst.s390x-linux.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -47,7 +47,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -69,7 +69,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/c-zst.sparc64-linux.stderr b/tests/ui/abi/c-zst.sparc64-linux.stderr index ba9738050d87d..7980710bab676 100644 --- a/tests/ui/abi/c-zst.sparc64-linux.stderr +++ b/tests/ui/abi/c-zst.sparc64-linux.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -47,7 +47,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -69,7 +69,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/c-zst.x86_64-linux.stderr b/tests/ui/abi/c-zst.x86_64-linux.stderr index 5a656e6ea66e0..7d384bc875f98 100644 --- a/tests/ui/abi/c-zst.x86_64-linux.stderr +++ b/tests/ui/abi/c-zst.x86_64-linux.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -36,7 +36,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -58,7 +58,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/c-zst.x86_64-pc-windows-gnu.stderr b/tests/ui/abi/c-zst.x86_64-pc-windows-gnu.stderr index ba9738050d87d..7980710bab676 100644 --- a/tests/ui/abi/c-zst.x86_64-pc-windows-gnu.stderr +++ b/tests/ui/abi/c-zst.x86_64-pc-windows-gnu.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -47,7 +47,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -69,7 +69,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: C, can_unwind: false, } - --> $DIR/c-zst.rs:27:1 + --> $DIR/c-zst.rs:63:1 | LL | extern "C" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/debug.stderr b/tests/ui/abi/debug.stderr index 7365839da89dc..aa51c42c58dc4 100644 --- a/tests/ui/abi/debug.stderr +++ b/tests/ui/abi/debug.stderr @@ -235,7 +235,7 @@ error: fn_abi_of(test_generic) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -317,7 +317,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -387,7 +387,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -426,7 +426,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Array { @@ -464,7 +464,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -497,7 +497,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Array { @@ -535,7 +535,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -610,7 +610,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -680,7 +680,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -756,7 +756,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -826,7 +826,7 @@ error: ABIs are not compatible abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -929,7 +929,7 @@ error: fn_abi_of(assoc_test) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/abi/sysv64-zst.stderr b/tests/ui/abi/sysv64-zst.stderr index 8b0b84dfa0699..8e1791e27d27c 100644 --- a/tests/ui/abi/sysv64-zst.stderr +++ b/tests/ui/abi/sysv64-zst.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -36,7 +36,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/abi/win64-zst.x86_64-linux.stderr b/tests/ui/abi/win64-zst.x86_64-linux.stderr index 15db141cb5748..76d90670eb1dd 100644 --- a/tests/ui/abi/win64-zst.x86_64-linux.stderr +++ b/tests/ui/abi/win64-zst.x86_64-linux.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -36,7 +36,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -58,7 +58,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: X86_64Win64, can_unwind: false, } - --> $DIR/win64-zst.rs:11:1 + --> $DIR/win64-zst.rs:24:1 | LL | extern "win64" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr b/tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr index 7773e0aa2b572..7ee90e2474413 100644 --- a/tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr +++ b/tests/ui/abi/win64-zst.x86_64-windows-gnu.stderr @@ -9,7 +9,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -47,7 +47,7 @@ error: fn_abi_of(pass_zst) = FnAbi { abi: $SOME_ALIGN, pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -69,7 +69,7 @@ error: fn_abi_of(pass_zst) = FnAbi { conv: X86_64Win64, can_unwind: false, } - --> $DIR/win64-zst.rs:11:1 + --> $DIR/win64-zst.rs:24:1 | LL | extern "win64" fn pass_zst(_: ()) {} | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/tests/ui/abi/win64-zst.x86_64-windows-msvc.stderr b/tests/ui/abi/win64-zst.x86_64-windows-msvc.stderr new file mode 100644 index 0000000000000..76d90670eb1dd --- /dev/null +++ b/tests/ui/abi/win64-zst.x86_64-windows-msvc.stderr @@ -0,0 +1,67 @@ +error: fn_abi_of(pass_zst) = FnAbi { + args: [ + ArgAbi { + layout: TyAndLayout { + ty: (), + layout: Layout { + size: Size(0 bytes), + align: AbiAndPrefAlign { + abi: $SOME_ALIGN, + pref: $SOME_ALIGN, + }, + abi: Memory { + sized: true, + }, + fields: Arbitrary { + offsets: [], + memory_index: [], + }, + largest_niche: None, + variants: Single { + index: 0, + }, + max_repr_align: None, + unadjusted_abi_align: $SOME_ALIGN, + }, + }, + mode: Ignore, + }, + ], + ret: ArgAbi { + layout: TyAndLayout { + ty: (), + layout: Layout { + size: Size(0 bytes), + align: AbiAndPrefAlign { + abi: $SOME_ALIGN, + pref: $SOME_ALIGN, + }, + abi: Memory { + sized: true, + }, + fields: Arbitrary { + offsets: [], + memory_index: [], + }, + largest_niche: None, + variants: Single { + index: 0, + }, + max_repr_align: None, + unadjusted_abi_align: $SOME_ALIGN, + }, + }, + mode: Ignore, + }, + c_variadic: false, + fixed_count: 1, + conv: X86_64Win64, + can_unwind: false, + } + --> $DIR/win64-zst.rs:24:1 + | +LL | extern "win64" fn pass_zst(_: ()) {} + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +error: aborting due to 1 previous error + diff --git a/tests/ui/layout/debug.stderr b/tests/ui/layout/debug.stderr index c9715a8e14632..bd31665dac1f7 100644 --- a/tests/ui/layout/debug.stderr +++ b/tests/ui/layout/debug.stderr @@ -10,7 +10,7 @@ error: layout_of(E) = Layout { abi: Align(4 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -48,7 +48,7 @@ error: layout_of(E) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -150,7 +150,7 @@ error: layout_of(U) = Layout { abi: Align(4 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -337,7 +337,7 @@ error: layout_of(V) = Layout { abi: Align(2 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -361,7 +361,7 @@ error: layout_of(W) = Layout { abi: Align(2 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -385,7 +385,7 @@ error: layout_of(Y) = Layout { abi: Align(2 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -409,7 +409,7 @@ error: layout_of(P1) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -433,7 +433,7 @@ error: layout_of(P2) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -457,7 +457,7 @@ error: layout_of(P3) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( @@ -481,7 +481,7 @@ error: layout_of(P4) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Union( diff --git a/tests/ui/layout/enum-scalar-pair-int-ptr.rs b/tests/ui/layout/enum-scalar-pair-int-ptr.rs index 885cc3e37dfd6..ebb3fdb1514c3 100644 --- a/tests/ui/layout/enum-scalar-pair-int-ptr.rs +++ b/tests/ui/layout/enum-scalar-pair-int-ptr.rs @@ -18,7 +18,7 @@ enum ScalarPairPointerWithInt { //~ERROR: abi: ScalarPair // of a different size. (Assumes that no target has 8 bit pointers, which // feels pretty safe.) #[rustc_layout(abi)] -enum NotScalarPairPointerWithSmallerInt { //~ERROR: abi: Aggregate +enum NotScalarPairPointerWithSmallerInt { //~ERROR: abi: Memory A(u8), B(Box<()>), } diff --git a/tests/ui/layout/enum-scalar-pair-int-ptr.stderr b/tests/ui/layout/enum-scalar-pair-int-ptr.stderr index b25eda628cd6a..357c8182ebd6f 100644 --- a/tests/ui/layout/enum-scalar-pair-int-ptr.stderr +++ b/tests/ui/layout/enum-scalar-pair-int-ptr.stderr @@ -4,7 +4,7 @@ error: abi: ScalarPair(Initialized { value: Int(I?, false), valid_range: $VALID_ LL | enum ScalarPairPointerWithInt { | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -error: abi: Aggregate { sized: true } +error: abi: Memory { sized: true } --> $DIR/enum-scalar-pair-int-ptr.rs:21:1 | LL | enum NotScalarPairPointerWithSmallerInt { diff --git a/tests/ui/layout/hexagon-enum.stderr b/tests/ui/layout/hexagon-enum.stderr index a2ad4a1ab58ad..59fe667923f11 100644 --- a/tests/ui/layout/hexagon-enum.stderr +++ b/tests/ui/layout/hexagon-enum.stderr @@ -48,7 +48,7 @@ error: layout_of(A) = Layout { abi: Align(1 bytes), pref: Align(1 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -122,7 +122,7 @@ error: layout_of(B) = Layout { abi: Align(1 bytes), pref: Align(1 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -196,7 +196,7 @@ error: layout_of(C) = Layout { abi: Align(2 bytes), pref: Align(2 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -270,7 +270,7 @@ error: layout_of(P) = Layout { abi: Align(4 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -344,7 +344,7 @@ error: layout_of(T) = Layout { abi: Align(4 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/layout/issue-96158-scalarpair-payload-might-be-uninit.stderr b/tests/ui/layout/issue-96158-scalarpair-payload-might-be-uninit.stderr index d3ba1a295b1bc..ca041fb539b93 100644 --- a/tests/ui/layout/issue-96158-scalarpair-payload-might-be-uninit.stderr +++ b/tests/ui/layout/issue-96158-scalarpair-payload-might-be-uninit.stderr @@ -90,7 +90,7 @@ error: layout_of(MissingPayloadField) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -489,7 +489,7 @@ error: layout_of(NicheFirst) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -509,7 +509,7 @@ error: layout_of(NicheFirst) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -641,7 +641,7 @@ error: layout_of(NicheSecond) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -661,7 +661,7 @@ error: layout_of(NicheSecond) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/layout/issue-96185-overaligned-enum.stderr b/tests/ui/layout/issue-96185-overaligned-enum.stderr index c539eb453d915..bc40a2aa482ed 100644 --- a/tests/ui/layout/issue-96185-overaligned-enum.stderr +++ b/tests/ui/layout/issue-96185-overaligned-enum.stderr @@ -4,7 +4,7 @@ error: layout_of(Aligned1) = Layout { abi: Align(8 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -42,7 +42,7 @@ error: layout_of(Aligned1) = Layout { abi: Align(8 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -64,7 +64,7 @@ error: layout_of(Aligned1) = Layout { abi: Align(8 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -142,7 +142,7 @@ error: layout_of(Aligned2) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -164,7 +164,7 @@ error: layout_of(Aligned2) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/layout/struct.rs b/tests/ui/layout/struct.rs index d072d123b0d4a..309624e667cc9 100644 --- a/tests/ui/layout/struct.rs +++ b/tests/ui/layout/struct.rs @@ -6,7 +6,7 @@ #![crate_type = "lib"] #[rustc_layout(abi)] -struct AlignedZstPreventsScalar(i16, [i32; 0]); //~ERROR: abi: Aggregate +struct AlignedZstPreventsScalar(i16, [i32; 0]); //~ERROR: abi: Memory #[rustc_layout(abi)] struct AlignedZstButStillScalar(i32, [i16; 0]); //~ERROR: abi: Scalar diff --git a/tests/ui/layout/struct.stderr b/tests/ui/layout/struct.stderr index b61c9a99cce6a..7bc9af61ed48e 100644 --- a/tests/ui/layout/struct.stderr +++ b/tests/ui/layout/struct.stderr @@ -1,4 +1,4 @@ -error: abi: Aggregate { sized: true } +error: abi: Memory { sized: true } --> $DIR/struct.rs:9:1 | LL | struct AlignedZstPreventsScalar(i16, [i32; 0]); diff --git a/tests/ui/layout/thumb-enum.stderr b/tests/ui/layout/thumb-enum.stderr index 6f6ab49820676..bf043af586b1c 100644 --- a/tests/ui/layout/thumb-enum.stderr +++ b/tests/ui/layout/thumb-enum.stderr @@ -48,7 +48,7 @@ error: layout_of(A) = Layout { abi: Align(1 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -122,7 +122,7 @@ error: layout_of(B) = Layout { abi: Align(1 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -196,7 +196,7 @@ error: layout_of(C) = Layout { abi: Align(2 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -270,7 +270,7 @@ error: layout_of(P) = Layout { abi: Align(4 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -344,7 +344,7 @@ error: layout_of(T) = Layout { abi: Align(4 bytes), pref: Align(4 bytes), }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/layout/zero-sized-array-enum-niche.stderr b/tests/ui/layout/zero-sized-array-enum-niche.stderr index ee34cfdfb0db0..d61408098df7f 100644 --- a/tests/ui/layout/zero-sized-array-enum-niche.stderr +++ b/tests/ui/layout/zero-sized-array-enum-niche.stderr @@ -4,7 +4,7 @@ error: layout_of(Result<[u32; 0], bool>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -42,7 +42,7 @@ error: layout_of(Result<[u32; 0], bool>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -66,7 +66,7 @@ error: layout_of(Result<[u32; 0], bool>) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -109,7 +109,7 @@ error: layout_of(MultipleAlignments) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -147,7 +147,7 @@ error: layout_of(MultipleAlignments) = Layout { abi: Align(2 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -171,7 +171,7 @@ error: layout_of(MultipleAlignments) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -195,7 +195,7 @@ error: layout_of(MultipleAlignments) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -238,7 +238,7 @@ error: layout_of(Result<[u32; 0], Packed>>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -276,7 +276,7 @@ error: layout_of(Result<[u32; 0], Packed>>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -300,7 +300,7 @@ error: layout_of(Result<[u32; 0], Packed>>) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -343,7 +343,7 @@ error: layout_of(Result<[u32; 0], Packed>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -385,7 +385,7 @@ error: layout_of(Result<[u32; 0], Packed>) = Layout { abi: Align(4 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -409,7 +409,7 @@ error: layout_of(Result<[u32; 0], Packed>) = Layout { abi: Align(1 bytes), pref: $PREF_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/repr/repr-c-dead-variants.aarch64-unknown-linux-gnu.stderr b/tests/ui/repr/repr-c-dead-variants.aarch64-unknown-linux-gnu.stderr index e2e57fe0e7311..64a0cb7f31a14 100644 --- a/tests/ui/repr/repr-c-dead-variants.aarch64-unknown-linux-gnu.stderr +++ b/tests/ui/repr/repr-c-dead-variants.aarch64-unknown-linux-gnu.stderr @@ -190,7 +190,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -254,7 +254,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/repr/repr-c-dead-variants.armebv7r-none-eabi.stderr b/tests/ui/repr/repr-c-dead-variants.armebv7r-none-eabi.stderr index 6ecdab1cc140c..5c4daa6d51977 100644 --- a/tests/ui/repr/repr-c-dead-variants.armebv7r-none-eabi.stderr +++ b/tests/ui/repr/repr-c-dead-variants.armebv7r-none-eabi.stderr @@ -190,7 +190,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -254,7 +254,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/repr/repr-c-dead-variants.i686-pc-windows-msvc.stderr b/tests/ui/repr/repr-c-dead-variants.i686-pc-windows-msvc.stderr index e2e57fe0e7311..64a0cb7f31a14 100644 --- a/tests/ui/repr/repr-c-dead-variants.i686-pc-windows-msvc.stderr +++ b/tests/ui/repr/repr-c-dead-variants.i686-pc-windows-msvc.stderr @@ -190,7 +190,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -254,7 +254,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/repr/repr-c-dead-variants.x86_64-unknown-linux-gnu.stderr b/tests/ui/repr/repr-c-dead-variants.x86_64-unknown-linux-gnu.stderr index e2e57fe0e7311..64a0cb7f31a14 100644 --- a/tests/ui/repr/repr-c-dead-variants.x86_64-unknown-linux-gnu.stderr +++ b/tests/ui/repr/repr-c-dead-variants.x86_64-unknown-linux-gnu.stderr @@ -190,7 +190,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -254,7 +254,7 @@ error: layout_of(DeadBranchHasOtherField) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/repr/repr-c-int-dead-variants.stderr b/tests/ui/repr/repr-c-int-dead-variants.stderr index f7df576df248b..75005a64523a3 100644 --- a/tests/ui/repr/repr-c-int-dead-variants.stderr +++ b/tests/ui/repr/repr-c-int-dead-variants.stderr @@ -190,7 +190,7 @@ error: layout_of(DeadBranchHasOtherFieldU8) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -254,7 +254,7 @@ error: layout_of(DeadBranchHasOtherFieldU8) = Layout { abi: Align(8 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { diff --git a/tests/ui/type/pattern_types/range_patterns.stderr b/tests/ui/type/pattern_types/range_patterns.stderr index 8465e1b7ff275..7bd0d826cab71 100644 --- a/tests/ui/type/pattern_types/range_patterns.stderr +++ b/tests/ui/type/pattern_types/range_patterns.stderr @@ -124,7 +124,7 @@ error: layout_of(Option<(u32) is 1..=>) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary { @@ -232,7 +232,7 @@ error: layout_of(Option>) = Layout { abi: Align(1 bytes), pref: $SOME_ALIGN, }, - abi: Aggregate { + abi: Memory { sized: true, }, fields: Arbitrary {