Skip to content

Commit

Permalink
Auto merge of #96220 - RalfJung:scalar-no-padding, r=oli-obk
Browse files Browse the repository at this point in the history
tighten sanity checks around Scalar and ScalarPair

While investigating #96185 I noticed codegen has tighter sanity checks here than Miri does, so I added some more assertions. Strangely, some of them fail, so I also needed to add a HACK... that is probably worth looking into.

This does not fix that issue, but it changes the ICE messages, making it quite clear that we have a scalar whose size is not the same as that of the surrounding layout.

r? `@oli-obk`
  • Loading branch information
bors committed May 11, 2022
2 parents 08b4f1b + 14f6daf commit 6dd6840
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 53 deletions.
57 changes: 38 additions & 19 deletions compiler/rustc_const_eval/src/interpret/operand.rs
Original file line number Diff line number Diff line change
Expand Up @@ -284,8 +284,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Abi::Scalar(s) if force => Some(s.primitive()),
_ => None,
};
if let Some(_) = scalar_layout {
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
if let Some(_s) = scalar_layout {
//FIXME(#96185): let size = s.size(self);
//FIXME(#96185): assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let size = mplace.layout.size; //FIXME(#96185): remove this line
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, size))?;
return Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }));
}
let scalar_pair_layout = match mplace.layout.abi {
Expand All @@ -302,7 +305,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
let (a_size, b_size) = (a.size(self), b.size(self));
let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
return Ok(Some(ImmTy {
Expand Down Expand Up @@ -394,28 +397,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Err(value) => value,
};

let field_layout = op.layout.field(self, field);
if field_layout.is_zst() {
let immediate = Scalar::ZST.into();
return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
}
let offset = op.layout.fields.offset(field);
let immediate = match *base {
let field_layout = base.layout.field(self, field);
let offset = base.layout.fields.offset(field);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let field_val: Immediate<_> = match (*base, base.layout.abi) {
// the field contains no information
_ if field_layout.is_zst() => Scalar::ZST.into(),
// the field covers the entire type
_ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
_ if field_layout.size == base.layout.size => {
assert!(match (base.layout.abi, field_layout.abi) {
(Abi::Scalar(..), Abi::Scalar(..)) => true,
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
_ => false,
});
assert!(offset.bytes() == 0);
*base
}
// extract fields from types with `ScalarPair` ABI
Immediate::ScalarPair(a, b) => {
let val = if offset.bytes() == 0 { a } else { b };
Immediate::from(val)
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
assert!(matches!(field_layout.abi, Abi::Scalar(..)));
Immediate::from(if offset.bytes() == 0 {
debug_assert_eq!(field_layout.size, a.size(self));
a_val
} else {
debug_assert_eq!(offset, a.size(self).align_to(b.align(self).abi));
debug_assert_eq!(field_layout.size, b.size(self));
b_val
})
}
Immediate::Scalar(val) => span_bug!(
_ => span_bug!(
self.cur_span(),
"field access on non aggregate {:#?}, {:#?}",
val,
op.layout
"invalid field access on immediate {}, layout {:#?}",
base,
base.layout
),
};
Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })

Ok(OpTy { op: Operand::Immediate(field_val), layout: field_layout })
}

pub fn operand_index(
Expand Down
40 changes: 10 additions & 30 deletions compiler/rustc_const_eval/src/interpret/place.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
use super::{
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
Operand, Pointer, Provenance, Scalar, ScalarMaybeUninit,
};

#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
Expand Down Expand Up @@ -700,24 +700,7 @@ where
src: Immediate<M::PointerTag>,
dest: &PlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
if cfg!(debug_assertions) {
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
match src {
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
self.pointer_size(),
dest.layout.size,
"Size mismatch when writing pointer"
),
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Int(int))) => {
assert_eq!(int.size(), dest.layout.size, "Size mismatch when writing bits")
}
Immediate::Scalar(ScalarMaybeUninit::Uninit) => {} // uninit can have any size
Immediate::ScalarPair(_, _) => {
// FIXME: Can we check anything here?
}
}
}
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);

// See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
Expand Down Expand Up @@ -753,31 +736,27 @@ where
dest: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx> {
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `src_val` is a `ScalarPair`, we don't do any magic here
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
// wrong type.

// Invalid places are a thing: the return place of a diverging function
let tcx = *self.tcx;
let Some(mut alloc) = self.get_place_alloc_mut(dest)? else {
// zero-sized access
return Ok(());
};

// FIXME: We should check that there are dest.layout.size many bytes available in
// memory. The code below is not sufficient, with enough padding it might not
// cover all the bytes!
match value {
Immediate::Scalar(scalar) => {
match dest.layout.abi {
Abi::Scalar(_) => {} // fine
_ => span_bug!(
let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout
),
}
alloc.write_scalar(alloc_range(Size::ZERO, dest.layout.size), scalar)
)
};
let size = s.size(&tcx);
//FIXME(#96185): assert_eq!(dest.layout.size, size, "abi::Scalar size does not match layout size");
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
// We checked `ptr_align` above, so all fields will have the alignment they need.
Expand All @@ -791,6 +770,7 @@ where
};
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
let b_offset = a_size.align_to(b.align(&tcx).abi);
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields

// It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
// but that does not work: We could be a newtype around a pair, then the
Expand Down
9 changes: 5 additions & 4 deletions compiler/rustc_const_eval/src/interpret/validity.rs
Original file line number Diff line number Diff line change
Expand Up @@ -645,17 +645,18 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// i.e. that we go over the `check_init` below.
let size = scalar_layout.size(self.ecx);
let is_full_range = match scalar_layout {
ScalarAbi::Initialized { valid_range, .. } => {
ScalarAbi::Initialized { .. } => {
if M::enforce_number_validity(self.ecx) {
false // not "full" since uninit is not accepted
} else {
valid_range.is_full_for(size)
scalar_layout.is_always_valid(self.ecx)
}
}
ScalarAbi::Union { .. } => true,
};
if is_full_range {
// Nothing to check
// Nothing to check. Cruciall we don't even `read_scalar` until here, since that would
// fail for `Union` scalars!
return Ok(());
}
// We have something to check: it must at least be initialized.
Expand Down Expand Up @@ -688,7 +689,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
} else {
return Ok(());
}
} else if scalar_layout.valid_range(self.ecx).is_full_for(size) {
} else if scalar_layout.is_always_valid(self.ecx) {
// Easy. (This is reachable if `enforce_number_validity` is set.)
return Ok(());
} else {
Expand Down

0 comments on commit 6dd6840

Please sign in to comment.