diff --git a/rust-version b/rust-version index 41821bcd73..0af97e2a28 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -1a56ec4dae92538ab6e0ecf993c61f3b50ed77cf +721268583759224d0f6476e0b8b196cc8afbdea0 diff --git a/src/fn_call.rs b/src/fn_call.rs index 3ff0c1eb18..dc43c65240 100644 --- a/src/fn_call.rs +++ b/src/fn_call.rs @@ -607,11 +607,12 @@ pub trait EvalContextExt<'a, 'mir, 'tcx: 'a + 'mir>: crate::MiriEvalContextExt<' // Extract the function type out of the signature (that seems easier than constructing it ourselves). let dtor = match this.read_scalar(args[1])?.not_undef()? { Scalar::Ptr(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?), - Scalar::Bits { bits: 0, size } => { + Scalar::Raw { data: 0, size } => { + // NULL pointer assert_eq!(size as u64, this.memory().pointer_size().bytes()); None }, - Scalar::Bits { .. } => return err!(ReadBytesAsPointer), + Scalar::Raw { .. } => return err!(ReadBytesAsPointer), }; // Figure out how large a pthread TLS key actually is. diff --git a/src/operator.rs b/src/operator.rs index 528da92c20..28d0d7c960 100644 --- a/src/operator.rs +++ b/src/operator.rs @@ -141,7 +141,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' ) -> EvalResult<'tcx, bool> { let size = self.pointer_size(); Ok(match (left, right) { - (Scalar::Bits { .. }, Scalar::Bits { .. }) => + (Scalar::Raw { .. }, Scalar::Raw { .. }) => left.to_bits(size)? == right.to_bits(size)?, (Scalar::Ptr(left), Scalar::Ptr(right)) => { // Comparison illegal if one of them is out-of-bounds, *unless* they @@ -165,10 +165,10 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' } } // Comparing ptr and integer. - (Scalar::Ptr(ptr), Scalar::Bits { bits, size }) | - (Scalar::Bits { bits, size }, Scalar::Ptr(ptr)) => { + (Scalar::Ptr(ptr), Scalar::Raw { data, size }) | + (Scalar::Raw { data, size }, Scalar::Ptr(ptr)) => { assert_eq!(size as u64, self.pointer_size().bytes()); - let bits = bits as u64; + let bits = data as u64; // Case I: Comparing real pointers with "small" integers. // Really we should only do this for NULL, but pragmatically speaking on non-bare-metal systems, @@ -262,7 +262,7 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // Truncate (shift left to drop out leftover values, shift right to fill with zeroes). (value << shift) >> shift }; - let ptr_size = self.memory().pointer_size().bytes() as u8; + let ptr_size = self.memory().pointer_size(); trace!("ptr BitAnd, align {}, operand {:#010x}, base_mask {:#010x}", ptr_base_align, right, base_mask); if right & base_mask == base_mask { @@ -278,7 +278,8 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' ) } else if right & base_mask == 0 { // Case 2: the base address bits are all taken away, i.e., right is all-0 there. - (Scalar::Bits { bits: (left.offset.bytes() as u128) & right, size: ptr_size }, false) + let v = Scalar::from_uint((left.offset.bytes() as u128) & right, ptr_size); + (v, false) } else { return err!(ReadPointerAsBytes); } @@ -289,18 +290,15 @@ impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, ' // (Intuition: modulo a divisor leaks less information.) let ptr_base_align = self.memory().get(left.alloc_id)?.align.bytes(); let right = right as u64; - let ptr_size = self.memory().pointer_size().bytes() as u8; + let ptr_size = self.memory().pointer_size(); if right == 1 { // Modulo 1 is always 0. - (Scalar::Bits { bits: 0, size: ptr_size }, false) + (Scalar::from_uint(0u32, ptr_size), false) } else if ptr_base_align % right == 0 { // The base address would be cancelled out by the modulo operation, so we can // just take the modulo of the offset. ( - Scalar::Bits { - bits: (left.offset.bytes() % right) as u128, - size: ptr_size - }, + Scalar::from_uint((left.offset.bytes() % right) as u128, ptr_size), false, ) } else { diff --git a/tests/run-pass/ptr_arith_offset_overflow.rs b/tests/run-pass/ptr_arith_offset_overflow.rs index 6b778248be..56fd448b0c 100644 --- a/tests/run-pass/ptr_arith_offset_overflow.rs +++ b/tests/run-pass/ptr_arith_offset_overflow.rs @@ -1,9 +1,12 @@ +use std::ptr; + fn main() { let v = [1i16, 2]; - let x = v.as_ptr().wrapping_offset(1); // ptr to the 2nd element + let x = &mut ptr::null(); // going through memory as there are more sanity checks along that path + *x = v.as_ptr().wrapping_offset(1); // ptr to the 2nd element // Adding 2*isize::max and then 1 is like substracting 1 - let x = x.wrapping_offset(isize::max_value()); - let x = x.wrapping_offset(isize::max_value()); - let x = x.wrapping_offset(1); - assert_eq!(unsafe { *x }, 1); + *x = x.wrapping_offset(isize::max_value()); + *x = x.wrapping_offset(isize::max_value()); + *x = x.wrapping_offset(1); + assert_eq!(unsafe { **x }, 1); } diff --git a/tests/run-pass/ptr_int_casts.rs b/tests/run-pass/ptr_int_casts.rs index b1b0626305..c279024f35 100644 --- a/tests/run-pass/ptr_int_casts.rs +++ b/tests/run-pass/ptr_int_casts.rs @@ -1,4 +1,5 @@ use std::mem; +use std::ptr; fn eq_ref(x: &T, y: &T) -> bool { x as *const _ == y as *const _ @@ -11,6 +12,12 @@ fn main() { assert_eq!(1 as *const i32 as usize, 1); assert_eq!((1 as *const i32).wrapping_offset(4) as usize, 1 + 4*4); + // negative overflowing wrapping_offset (going through memory because + // this used to trigger an ICE on 32bit) + let val = &mut ptr::null(); + *val = (1 as *const u8).wrapping_offset(-4); + assert_eq!(*val as usize, usize::max_value() - 2); + { // ptr-int-ptr let x = 13; let mut y = &x as &_ as *const _ as usize;