Skip to content

Commit b7db732

Browse files
committed
go back to infix ops for Size
1 parent 1d67ca0 commit b7db732

File tree

6 files changed

+30
-33
lines changed

6 files changed

+30
-33
lines changed

src/librustc/mir/interpret/allocation.rs

+12-12
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
use std::borrow::Cow;
44
use std::convert::TryFrom;
55
use std::iter;
6-
use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub};
6+
use std::ops::{Deref, DerefMut, Range};
77

88
use rustc_ast::ast::Mutability;
99
use rustc_data_structures::sorted_map::SortedMap;
@@ -183,7 +183,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
183183
/// Returns the range of this allocation that was meant.
184184
#[inline]
185185
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
186-
let end = Size::add(offset, size); // This does overflow checking.
186+
let end = offset + size; // This does overflow checking.
187187
let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
188188
assert!(
189189
end <= self.len(),
@@ -293,7 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
293293
let offset = usize::try_from(ptr.offset.bytes()).unwrap();
294294
Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
295295
Some(size) => {
296-
let size_with_null = Size::add(Size::from_bytes(size), Size::from_bytes(1));
296+
let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
297297
// Go through `get_bytes` for checks and AllocationExtra hooks.
298298
// We read the null, so we include it in the request, but we want it removed
299299
// from the result, so we do subslicing.
@@ -474,7 +474,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
474474
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
475475
// the beginning of this range.
476476
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
477-
let end = Size::add(ptr.offset, size); // This does overflow checking.
477+
let end = ptr.offset + size; // This does overflow checking.
478478
self.relocations.range(Size::from_bytes(start)..end)
479479
}
480480

@@ -519,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
519519
)
520520
};
521521
let start = ptr.offset;
522-
let end = Size::add(start, size);
522+
let end = start + size; // `Size` addition
523523

524524
// Mark parts of the outermost relocations as undefined if they partially fall outside the
525525
// given range.
@@ -558,15 +558,15 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
558558
#[inline]
559559
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
560560
self.undef_mask
561-
.is_range_defined(ptr.offset, Size::add(ptr.offset, size))
561+
.is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
562562
.or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
563563
}
564564

565565
pub fn mark_definedness(&mut self, ptr: Pointer<Tag>, size: Size, new_state: bool) {
566566
if size.bytes() == 0 {
567567
return;
568568
}
569-
self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state);
569+
self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
570570
}
571571
}
572572

@@ -611,7 +611,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
611611

612612
for i in 1..size.bytes() {
613613
// FIXME: optimize to bitshift the current undef block's bits and read the top bit.
614-
if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur {
614+
if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
615615
cur_len += 1;
616616
} else {
617617
ranges.push(cur_len);
@@ -638,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
638638
if defined.ranges.len() <= 1 {
639639
self.undef_mask.set_range_inbounds(
640640
dest.offset,
641-
Size::add(dest.offset, Size::mul(size, repeat)),
641+
dest.offset + size * repeat, // `Size` operations
642642
defined.initial,
643643
);
644644
return;
@@ -716,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
716716
for i in 0..length {
717717
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
718718
// compute offset for current repetition
719-
let dest_offset = Size::add(dest.offset, Size::mul(size, i));
719+
let dest_offset = dest.offset + size * i; // `Size` operations
720720
(
721721
// shift offsets from source allocation to destination allocation
722-
Size::sub(Size::add(offset, dest_offset), src.offset),
722+
(offset + dest_offset) - src.offset, // `Size` operations
723723
reloc,
724724
)
725725
}));
@@ -867,7 +867,7 @@ impl UndefMask {
867867
}
868868
let start = self.len;
869869
self.len += amount;
870-
self.set_range_inbounds(start, Size::add(start, amount), new_state);
870+
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
871871
}
872872
}
873873

src/librustc_mir/interpret/eval_context.rs

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
use std::cell::Cell;
22
use std::fmt::Write;
33
use std::mem;
4-
use std::ops::Add;
54

65
use rustc::ich::StableHashingContext;
76
use rustc::mir;
@@ -454,7 +453,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
454453
// here. But this is where the add would go.)
455454

456455
// Return the sum of sizes and max of aligns.
457-
let size = Size::add(sized_size, unsized_size);
456+
let size = sized_size + unsized_size; // `Size` addition
458457

459458
// Choose max of two known alignments (combined value must
460459
// be aligned according to more restrictive of the two).

src/librustc_mir/interpret/memory.rs

+7-7
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@
99
use std::borrow::Cow;
1010
use std::collections::VecDeque;
1111
use std::convert::TryFrom;
12-
use std::ops::{Add, Mul};
1312
use std::ptr;
1413

1514
use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
@@ -880,7 +879,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
880879
let src_bytes =
881880
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
882881
let dest_bytes =
883-
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?;
882+
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
884883

885884
// If `dest_bytes` is empty we just optimize to not run anything for zsts.
886885
// See #67539
@@ -901,7 +900,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
901900
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
902901
// operating system this can avoid physically allocating the page.
903902
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
904-
dest_alloc.mark_definedness(dest, Size::mul(size, length), false);
903+
dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication
905904
dest_alloc.mark_relocation_range(relocations);
906905
return Ok(());
907906
}
@@ -914,8 +913,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
914913
unsafe {
915914
if src.alloc_id == dest.alloc_id {
916915
if nonoverlapping {
917-
if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset)
918-
|| (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset)
916+
// `Size` additions
917+
if (src.offset <= dest.offset && src.offset + size > dest.offset)
918+
|| (dest.offset <= src.offset && dest.offset + size > src.offset)
919919
{
920920
throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
921921
}
@@ -924,15 +924,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
924924
for i in 0..length {
925925
ptr::copy(
926926
src_bytes,
927-
dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
927+
dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication
928928
usize::try_from(size.bytes()).unwrap(),
929929
);
930930
}
931931
} else {
932932
for i in 0..length {
933933
ptr::copy_nonoverlapping(
934934
src_bytes,
935-
dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
935+
dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication
936936
usize::try_from(size.bytes()).unwrap(),
937937
);
938938
}

src/librustc_mir/interpret/place.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
55
use std::convert::TryFrom;
66
use std::hash::Hash;
7-
use std::ops::Mul;
87

98
use rustc::mir;
109
use rustc::mir::interpret::truncate;
@@ -444,7 +443,7 @@ where
444443
// This can only be reached in ConstProp and non-rustc-MIR.
445444
throw_ub!(BoundsCheckFailed { len, index });
446445
}
447-
let offset = Size::mul(stride, index);
446+
let offset = stride * index; // `Size` multiplication
448447
// All fields have the same layout.
449448
let field_layout = base.layout.field(self, 0)?;
450449

@@ -469,7 +468,8 @@ where
469468
};
470469
let layout = base.layout.field(self, 0)?;
471470
let dl = &self.tcx.data_layout;
472-
Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl)))
471+
// `Size` multiplication
472+
Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
473473
}
474474

475475
fn mplace_subslice(
@@ -493,7 +493,7 @@ where
493493
// Not using layout method because that works with usize, and does not work with slices
494494
// (that have count 0 in their layout).
495495
let from_offset = match base.layout.fields {
496-
layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked
496+
layout::FieldPlacement::Array { stride, .. } => stride * from, // `Size` multiplication is checked
497497
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
498498
};
499499

src/librustc_mir/interpret/traits.rs

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,4 @@
11
use std::convert::TryFrom;
2-
use std::ops::Mul;
32

43
use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
54
use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
@@ -57,7 +56,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
5756
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
5857
// /////////////////////////////////////////////////////////////////////////////////////////
5958
let vtable = self.memory.allocate(
60-
Size::mul(ptr_size, u64::try_from(methods.len()).unwrap().checked_add(3).unwrap()),
59+
ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
6160
ptr_align,
6261
MemoryKind::Vtable,
6362
);
@@ -110,8 +109,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
110109
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
111110
let ptr_size = self.pointer_size();
112111
// Skip over the 'drop_ptr', 'size', and 'align' fields.
113-
let vtable_slot =
114-
vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?;
112+
let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
115113
let vtable_slot = self
116114
.memory
117115
.check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?

src/librustc_mir/interpret/validity.rs

+4-4
Original file line numberDiff line numberDiff line change
@@ -6,10 +6,10 @@
66
77
use std::convert::TryFrom;
88
use std::fmt::Write;
9-
use std::ops::{Mul, RangeInclusive};
9+
use std::ops::RangeInclusive;
1010

1111
use rustc::ty;
12-
use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx};
12+
use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx};
1313
use rustc_data_structures::fx::FxHashSet;
1414
use rustc_hir as hir;
1515
use rustc_span::symbol::{sym, Symbol};
@@ -747,8 +747,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
747747
}
748748
// This is the element type size.
749749
let layout = self.ecx.layout_of(tys)?;
750-
// This is the size in bytes of the whole array.
751-
let size = Size::mul(layout.size, len);
750+
// This is the size in bytes of the whole array. (This checks for overflow.)
751+
let size = layout.size * len;
752752
// Size is not 0, get a pointer.
753753
let ptr = self.ecx.force_ptr(mplace.ptr)?;
754754

0 commit comments

Comments
 (0)