3
3
use std:: borrow:: Cow ;
4
4
use std:: convert:: TryFrom ;
5
5
use std:: iter;
6
- use std:: ops:: { Add , Deref , DerefMut , Mul , Range , Sub } ;
6
+ use std:: ops:: { Deref , DerefMut , Range } ;
7
7
8
8
use rustc_ast:: ast:: Mutability ;
9
9
use rustc_data_structures:: sorted_map:: SortedMap ;
@@ -183,7 +183,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
183
183
/// Returns the range of this allocation that was meant.
184
184
#[ inline]
185
185
fn check_bounds ( & self , offset : Size , size : Size ) -> Range < usize > {
186
- let end = Size :: add ( offset, size) ; // This does overflow checking.
186
+ let end = offset + size; // This does overflow checking.
187
187
let end = usize:: try_from ( end. bytes ( ) ) . expect ( "access too big for this host architecture" ) ;
188
188
assert ! (
189
189
end <= self . len( ) ,
@@ -293,7 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
293
293
let offset = usize:: try_from ( ptr. offset . bytes ( ) ) . unwrap ( ) ;
294
294
Ok ( match self . bytes [ offset..] . iter ( ) . position ( |& c| c == 0 ) {
295
295
Some ( size) => {
296
- let size_with_null = Size :: add ( Size :: from_bytes ( size) , Size :: from_bytes ( 1 ) ) ;
296
+ let size_with_null = Size :: from_bytes ( size) + Size :: from_bytes ( 1 ) ;
297
297
// Go through `get_bytes` for checks and AllocationExtra hooks.
298
298
// We read the null, so we include it in the request, but we want it removed
299
299
// from the result, so we do subslicing.
@@ -474,7 +474,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
474
474
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
475
475
// the beginning of this range.
476
476
let start = ptr. offset . bytes ( ) . saturating_sub ( cx. data_layout ( ) . pointer_size . bytes ( ) - 1 ) ;
477
- let end = Size :: add ( ptr. offset , size) ; // This does overflow checking.
477
+ let end = ptr. offset + size; // This does overflow checking.
478
478
self . relocations . range ( Size :: from_bytes ( start) ..end)
479
479
}
480
480
@@ -519,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
519
519
)
520
520
} ;
521
521
let start = ptr. offset ;
522
- let end = Size :: add ( start, size) ;
522
+ let end = start + size; // `Size` addition
523
523
524
524
// Mark parts of the outermost relocations as undefined if they partially fall outside the
525
525
// given range.
@@ -558,15 +558,15 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
558
558
#[ inline]
559
559
fn check_defined ( & self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
560
560
self . undef_mask
561
- . is_range_defined ( ptr. offset , Size :: add ( ptr. offset , size) )
561
+ . is_range_defined ( ptr. offset , ptr. offset + size) // `Size` addition
562
562
. or_else ( |idx| throw_ub ! ( InvalidUndefBytes ( Some ( Pointer :: new( ptr. alloc_id, idx) ) ) ) )
563
563
}
564
564
565
565
pub fn mark_definedness ( & mut self , ptr : Pointer < Tag > , size : Size , new_state : bool ) {
566
566
if size. bytes ( ) == 0 {
567
567
return ;
568
568
}
569
- self . undef_mask . set_range ( ptr. offset , Size :: add ( ptr. offset , size) , new_state) ;
569
+ self . undef_mask . set_range ( ptr. offset , ptr. offset + size, new_state) ;
570
570
}
571
571
}
572
572
@@ -611,7 +611,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
611
611
612
612
for i in 1 ..size. bytes ( ) {
613
613
// FIXME: optimize to bitshift the current undef block's bits and read the top bit.
614
- if self . undef_mask . get ( Size :: add ( src. offset , Size :: from_bytes ( i) ) ) == cur {
614
+ if self . undef_mask . get ( src. offset + Size :: from_bytes ( i) ) == cur {
615
615
cur_len += 1 ;
616
616
} else {
617
617
ranges. push ( cur_len) ;
@@ -638,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
638
638
if defined. ranges . len ( ) <= 1 {
639
639
self . undef_mask . set_range_inbounds (
640
640
dest. offset ,
641
- Size :: add ( dest. offset , Size :: mul ( size, repeat) ) ,
641
+ dest. offset + size * repeat, // `Size` operations
642
642
defined. initial ,
643
643
) ;
644
644
return ;
@@ -716,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
716
716
for i in 0 ..length {
717
717
new_relocations. extend ( relocations. iter ( ) . map ( |& ( offset, reloc) | {
718
718
// compute offset for current repetition
719
- let dest_offset = Size :: add ( dest. offset , Size :: mul ( size, i ) ) ;
719
+ let dest_offset = dest. offset + size * i ; // `Size` operations
720
720
(
721
721
// shift offsets from source allocation to destination allocation
722
- Size :: sub ( Size :: add ( offset, dest_offset) , src. offset ) ,
722
+ ( offset + dest_offset) - src. offset , // `Size` operations
723
723
reloc,
724
724
)
725
725
} ) ) ;
@@ -867,7 +867,7 @@ impl UndefMask {
867
867
}
868
868
let start = self . len ;
869
869
self . len += amount;
870
- self . set_range_inbounds ( start, Size :: add ( start, amount) , new_state) ;
870
+ self . set_range_inbounds ( start, start + amount, new_state) ; // `Size` operation
871
871
}
872
872
}
873
873
0 commit comments