@@ -73,9 +73,9 @@ use rustc_middle::{mir, ty::layout::TyAndLayout};
73
73
use rustc_target:: abi:: Size ;
74
74
75
75
use crate :: {
76
- ImmTy , Immediate , InterpResult , MPlaceTy , MemPlaceMeta , MemoryKind , MiriEvalContext ,
77
- MiriEvalContextExt , MiriMemoryKind , OpTy , Pointer , RangeMap , Scalar , ScalarMaybeUninit , Tag ,
78
- ThreadId , VClock , VTimestamp , VectorIdx ,
76
+ AllocId , AllocRange , ImmTy , Immediate , InterpResult , MPlaceTy , MemPlaceMeta , MemoryKind ,
77
+ MiriEvalContext , MiriEvalContextExt , MiriMemoryKind , OpTy , Pointer , RangeMap , Scalar ,
78
+ ScalarMaybeUninit , Tag , ThreadId , VClock , VTimestamp , VectorIdx ,
79
79
} ;
80
80
81
81
pub type AllocExtra = VClockAlloc ;
@@ -561,7 +561,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
561
561
if lt { & rhs } else { & old }
562
562
} ;
563
563
564
- this. allow_data_races_mut ( |this| this. write_immediate_to_mplace ( * * new_val, place) ) ?;
564
+ this. allow_data_races_mut ( |this| this. write_immediate ( * * new_val, & ( * place) . into ( ) ) ) ?;
565
565
566
566
this. validate_atomic_rmw ( & place, atomic) ?;
567
567
@@ -713,18 +713,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
713
713
Ok ( ( ) )
714
714
}
715
715
}
716
-
717
- fn reset_vector_clocks ( & mut self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
718
- let this = self . eval_context_mut ( ) ;
719
- if let Some ( data_race) = & mut this. memory . extra . data_race {
720
- if data_race. multi_threaded . get ( ) {
721
- let alloc_meta =
722
- this. memory . get_alloc_extra_mut ( ptr. alloc_id ) ?. 0 . data_race . as_mut ( ) . unwrap ( ) ;
723
- alloc_meta. reset_clocks ( ptr. offset , size) ;
724
- }
725
- }
726
- Ok ( ( ) )
727
- }
728
716
}
729
717
730
718
/// Vector clock metadata for a logical memory allocation.
@@ -769,14 +757,6 @@ impl VClockAlloc {
769
757
}
770
758
}
771
759
772
- fn reset_clocks ( & mut self , offset : Size , len : Size ) {
773
- let alloc_ranges = self . alloc_ranges . get_mut ( ) ;
774
- for ( _, range) in alloc_ranges. iter_mut ( offset, len) {
775
- // Reset the portion of the range
776
- * range = MemoryCellClocks :: new ( 0 , VectorIdx :: MAX_INDEX ) ;
777
- }
778
- }
779
-
780
760
// Find an index, if one exists where the value
781
761
// in `l` is greater than the value in `r`.
782
762
fn find_gt_index ( l : & VClock , r : & VClock ) -> Option < VectorIdx > {
@@ -820,8 +800,7 @@ impl VClockAlloc {
820
800
range : & MemoryCellClocks ,
821
801
action : & str ,
822
802
is_atomic : bool ,
823
- pointer : Pointer < Tag > ,
824
- len : Size ,
803
+ ptr_dbg : Pointer < AllocId > ,
825
804
) -> InterpResult < ' tcx > {
826
805
let ( current_index, current_clocks) = global. current_thread_state ( ) ;
827
806
let write_clock;
@@ -863,15 +842,12 @@ impl VClockAlloc {
863
842
864
843
// Throw the data-race detection.
865
844
throw_ub_format ! (
866
- "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
867
- \n (current vector clock = {:?}, conflicting timestamp = {:?})",
845
+ "Data race detected between {} on {} and {} on {} at {:?} (current vector clock = {:?}, conflicting timestamp = {:?})" ,
868
846
action,
869
847
current_thread_info,
870
848
other_action,
871
849
other_thread_info,
872
- pointer. alloc_id,
873
- pointer. offset. bytes( ) ,
874
- len. bytes( ) ,
850
+ ptr_dbg,
875
851
current_clocks. clock,
876
852
other_clock
877
853
)
@@ -884,17 +860,23 @@ impl VClockAlloc {
884
860
/// atomic read operations.
885
861
pub fn read < ' tcx > (
886
862
& self ,
887
- pointer : Pointer < Tag > ,
888
- len : Size ,
863
+ alloc_id : AllocId ,
864
+ range : AllocRange ,
889
865
global : & GlobalState ,
890
866
) -> InterpResult < ' tcx > {
891
867
if global. multi_threaded . get ( ) {
892
868
let ( index, clocks) = global. current_thread_state ( ) ;
893
869
let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
894
- for ( _ , range) in alloc_ranges. iter_mut ( pointer . offset , len ) {
870
+ for ( offset , range) in alloc_ranges. iter_mut ( range . start , range . size ) {
895
871
if let Err ( DataRace ) = range. read_race_detect ( & * clocks, index) {
896
872
// Report data-race.
897
- return Self :: report_data_race ( global, range, "Read" , false , pointer, len) ;
873
+ return Self :: report_data_race (
874
+ global,
875
+ range,
876
+ "Read" ,
877
+ false ,
878
+ Pointer :: new ( alloc_id, offset) ,
879
+ ) ;
898
880
}
899
881
}
900
882
Ok ( ( ) )
@@ -906,23 +888,22 @@ impl VClockAlloc {
906
888
// Shared code for detecting data-races on unique access to a section of memory
907
889
fn unique_access < ' tcx > (
908
890
& mut self ,
909
- pointer : Pointer < Tag > ,
910
- len : Size ,
891
+ alloc_id : AllocId ,
892
+ range : AllocRange ,
911
893
write_type : WriteType ,
912
894
global : & mut GlobalState ,
913
895
) -> InterpResult < ' tcx > {
914
896
if global. multi_threaded . get ( ) {
915
897
let ( index, clocks) = global. current_thread_state ( ) ;
916
- for ( _ , range) in self . alloc_ranges . get_mut ( ) . iter_mut ( pointer . offset , len ) {
898
+ for ( offset , range) in self . alloc_ranges . get_mut ( ) . iter_mut ( range . start , range . size ) {
917
899
if let Err ( DataRace ) = range. write_race_detect ( & * clocks, index, write_type) {
918
900
// Report data-race
919
901
return Self :: report_data_race (
920
902
global,
921
903
range,
922
904
write_type. get_descriptor ( ) ,
923
905
false ,
924
- pointer,
925
- len,
906
+ Pointer :: new ( alloc_id, offset) ,
926
907
) ;
927
908
}
928
909
}
@@ -938,11 +919,11 @@ impl VClockAlloc {
938
919
/// operation
939
920
pub fn write < ' tcx > (
940
921
& mut self ,
941
- pointer : Pointer < Tag > ,
942
- len : Size ,
922
+ alloc_id : AllocId ,
923
+ range : AllocRange ,
943
924
global : & mut GlobalState ,
944
925
) -> InterpResult < ' tcx > {
945
- self . unique_access ( pointer , len , WriteType :: Write , global)
926
+ self . unique_access ( alloc_id , range , WriteType :: Write , global)
946
927
}
947
928
948
929
/// Detect data-races for an unsynchronized deallocate operation, will not perform
@@ -951,11 +932,11 @@ impl VClockAlloc {
951
932
/// operation
952
933
pub fn deallocate < ' tcx > (
953
934
& mut self ,
954
- pointer : Pointer < Tag > ,
955
- len : Size ,
935
+ alloc_id : AllocId ,
936
+ range : AllocRange ,
956
937
global : & mut GlobalState ,
957
938
) -> InterpResult < ' tcx > {
958
- self . unique_access ( pointer , len , WriteType :: Deallocate , global)
939
+ self . unique_access ( alloc_id , range , WriteType :: Deallocate , global)
959
940
}
960
941
}
961
942
@@ -1002,12 +983,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1002
983
result
1003
984
}
1004
985
1005
- /// Generic atomic operation implementation,
1006
- /// this accesses memory via get_raw instead of
1007
- /// get_raw_mut, due to issues calling get_raw_mut
1008
- /// for atomic loads from read-only memory.
1009
- /// FIXME: is this valid, or should get_raw_mut be used for
1010
- /// atomic-stores/atomic-rmw?
986
+ /// Generic atomic operation implementation
1011
987
fn validate_atomic_op < A : Debug + Copy > (
1012
988
& self ,
1013
989
place : & MPlaceTy < ' tcx , Tag > ,
@@ -1023,25 +999,24 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1023
999
let this = self . eval_context_ref ( ) ;
1024
1000
if let Some ( data_race) = & this. memory . extra . data_race {
1025
1001
if data_race. multi_threaded . get ( ) {
1002
+ let size = place. layout . size ;
1003
+ let ( alloc_id, base_offset, ptr) = this. memory . ptr_get_alloc ( place. ptr ) ?;
1026
1004
// Load and log the atomic operation.
1027
1005
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1028
- let place_ptr = place. ptr . assert_ptr ( ) ;
1029
- let size = place. layout . size ;
1030
1006
let alloc_meta =
1031
- & this. memory . get_alloc_extra ( place_ptr . alloc_id ) ?. data_race . as_ref ( ) . unwrap ( ) ;
1007
+ & this. memory . get_alloc_extra ( alloc_id) ?. data_race . as_ref ( ) . unwrap ( ) ;
1032
1008
log:: trace!(
1033
- "Atomic op({}) with ordering {:?} on memory( {:?}, offset={}, size={})" ,
1009
+ "Atomic op({}) with ordering {:?} on {:?} ( size={})" ,
1034
1010
description,
1035
1011
& atomic,
1036
- place_ptr. alloc_id,
1037
- place_ptr. offset. bytes( ) ,
1012
+ ptr,
1038
1013
size. bytes( )
1039
1014
) ;
1040
1015
1041
1016
// Perform the atomic operation.
1042
1017
data_race. maybe_perform_sync_operation ( |index, mut clocks| {
1043
- for ( _ , range) in
1044
- alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( place_ptr . offset , size)
1018
+ for ( offset , range) in
1019
+ alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( base_offset , size)
1045
1020
{
1046
1021
if let Err ( DataRace ) = op ( range, & mut * clocks, index, atomic) {
1047
1022
mem:: drop ( clocks) ;
@@ -1050,8 +1025,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1050
1025
range,
1051
1026
description,
1052
1027
true ,
1053
- place_ptr,
1054
- size,
1028
+ Pointer :: new ( alloc_id, offset) ,
1055
1029
)
1056
1030
. map ( |_| true ) ;
1057
1031
}
@@ -1063,12 +1037,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1063
1037
1064
1038
// Log changes to atomic memory.
1065
1039
if log:: log_enabled!( log:: Level :: Trace ) {
1066
- for ( _ , range) in alloc_meta. alloc_ranges . borrow ( ) . iter ( place_ptr . offset , size)
1040
+ for ( _offset , range) in alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset , size)
1067
1041
{
1068
1042
log:: trace!(
1069
- "Updated atomic memory({:?}, offset={}, size={}) to {:#?}" ,
1070
- place. ptr. assert_ptr( ) . alloc_id,
1071
- place_ptr. offset. bytes( ) ,
1043
+ "Updated atomic memory({:?}, size={}) to {:#?}" ,
1044
+ ptr,
1072
1045
size. bytes( ) ,
1073
1046
range. atomic_ops
1074
1047
) ;
0 commit comments