@@ -75,7 +75,7 @@ use rustc_target::abi::Size;
75
75
use crate :: {
76
76
ImmTy , Immediate , InterpResult , MPlaceTy , MemPlaceMeta , MemoryKind , MiriEvalContext ,
77
77
MiriEvalContextExt , MiriMemoryKind , OpTy , Pointer , RangeMap , Scalar , ScalarMaybeUninit , Tag ,
78
- ThreadId , VClock , VTimestamp , VectorIdx ,
78
+ ThreadId , VClock , VTimestamp , VectorIdx , AllocId , AllocRange ,
79
79
} ;
80
80
81
81
pub type AllocExtra = VClockAlloc ;
@@ -561,7 +561,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
561
561
if lt { & rhs } else { & old }
562
562
} ;
563
563
564
- this. allow_data_races_mut ( |this| this. write_immediate_to_mplace ( * * new_val, place) ) ?;
564
+ this. allow_data_races_mut ( |this| this. write_immediate ( * * new_val, & ( * place) . into ( ) ) ) ?;
565
565
566
566
this. validate_atomic_rmw ( & place, atomic) ?;
567
567
@@ -713,18 +713,6 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
713
713
Ok ( ( ) )
714
714
}
715
715
}
716
-
717
- fn reset_vector_clocks ( & mut self , ptr : Pointer < Tag > , size : Size ) -> InterpResult < ' tcx > {
718
- let this = self . eval_context_mut ( ) ;
719
- if let Some ( data_race) = & mut this. memory . extra . data_race {
720
- if data_race. multi_threaded . get ( ) {
721
- let alloc_meta =
722
- this. memory . get_alloc_extra_mut ( ptr. alloc_id ) ?. 0 . data_race . as_mut ( ) . unwrap ( ) ;
723
- alloc_meta. reset_clocks ( ptr. offset , size) ;
724
- }
725
- }
726
- Ok ( ( ) )
727
- }
728
716
}
729
717
730
718
/// Vector clock metadata for a logical memory allocation.
@@ -769,14 +757,6 @@ impl VClockAlloc {
769
757
}
770
758
}
771
759
772
- fn reset_clocks ( & mut self , offset : Size , len : Size ) {
773
- let alloc_ranges = self . alloc_ranges . get_mut ( ) ;
774
- for ( _, range) in alloc_ranges. iter_mut ( offset, len) {
775
- // Reset the portion of the range
776
- * range = MemoryCellClocks :: new ( 0 , VectorIdx :: MAX_INDEX ) ;
777
- }
778
- }
779
-
780
760
// Find an index, if one exists where the value
781
761
// in `l` is greater than the value in `r`.
782
762
fn find_gt_index ( l : & VClock , r : & VClock ) -> Option < VectorIdx > {
@@ -820,8 +800,7 @@ impl VClockAlloc {
820
800
range : & MemoryCellClocks ,
821
801
action : & str ,
822
802
is_atomic : bool ,
823
- pointer : Pointer < Tag > ,
824
- len : Size ,
803
+ ptr_dbg : Pointer < AllocId > ,
825
804
) -> InterpResult < ' tcx > {
826
805
let ( current_index, current_clocks) = global. current_thread_state ( ) ;
827
806
let write_clock;
@@ -863,15 +842,12 @@ impl VClockAlloc {
863
842
864
843
// Throw the data-race detection.
865
844
throw_ub_format ! (
866
- "Data race detected between {} on {} and {} on {}, memory({:?},offset={},size={})\
867
- \n (current vector clock = {:?}, conflicting timestamp = {:?})",
845
+ "Data race detected between {} on {} and {} on {} at {:?} (current vector clock = {:?}, conflicting timestamp = {:?})" ,
868
846
action,
869
847
current_thread_info,
870
848
other_action,
871
849
other_thread_info,
872
- pointer. alloc_id,
873
- pointer. offset. bytes( ) ,
874
- len. bytes( ) ,
850
+ ptr_dbg,
875
851
current_clocks. clock,
876
852
other_clock
877
853
)
@@ -884,17 +860,17 @@ impl VClockAlloc {
884
860
/// atomic read operations.
885
861
pub fn read < ' tcx > (
886
862
& self ,
887
- pointer : Pointer < Tag > ,
888
- len : Size ,
863
+ alloc_id : AllocId ,
864
+ range : AllocRange ,
889
865
global : & GlobalState ,
890
866
) -> InterpResult < ' tcx > {
891
867
if global. multi_threaded . get ( ) {
892
868
let ( index, clocks) = global. current_thread_state ( ) ;
893
869
let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
894
- for ( _ , range) in alloc_ranges. iter_mut ( pointer . offset , len ) {
870
+ for ( offset , range) in alloc_ranges. iter_mut ( range . start , range . size ) {
895
871
if let Err ( DataRace ) = range. read_race_detect ( & * clocks, index) {
896
872
// Report data-race.
897
- return Self :: report_data_race ( global, range, "Read" , false , pointer , len ) ;
873
+ return Self :: report_data_race ( global, range, "Read" , false , Pointer :: new ( alloc_id , offset ) ) ;
898
874
}
899
875
}
900
876
Ok ( ( ) )
@@ -906,23 +882,22 @@ impl VClockAlloc {
906
882
// Shared code for detecting data-races on unique access to a section of memory
907
883
fn unique_access < ' tcx > (
908
884
& mut self ,
909
- pointer : Pointer < Tag > ,
910
- len : Size ,
885
+ alloc_id : AllocId ,
886
+ range : AllocRange ,
911
887
write_type : WriteType ,
912
888
global : & mut GlobalState ,
913
889
) -> InterpResult < ' tcx > {
914
890
if global. multi_threaded . get ( ) {
915
891
let ( index, clocks) = global. current_thread_state ( ) ;
916
- for ( _ , range) in self . alloc_ranges . get_mut ( ) . iter_mut ( pointer . offset , len ) {
892
+ for ( offset , range) in self . alloc_ranges . get_mut ( ) . iter_mut ( range . start , range . size ) {
917
893
if let Err ( DataRace ) = range. write_race_detect ( & * clocks, index, write_type) {
918
894
// Report data-race
919
895
return Self :: report_data_race (
920
896
global,
921
897
range,
922
898
write_type. get_descriptor ( ) ,
923
899
false ,
924
- pointer,
925
- len,
900
+ Pointer :: new ( alloc_id, offset) ,
926
901
) ;
927
902
}
928
903
}
@@ -938,11 +913,11 @@ impl VClockAlloc {
938
913
/// operation
939
914
pub fn write < ' tcx > (
940
915
& mut self ,
941
- pointer : Pointer < Tag > ,
942
- len : Size ,
916
+ alloc_id : AllocId ,
917
+ range : AllocRange ,
943
918
global : & mut GlobalState ,
944
919
) -> InterpResult < ' tcx > {
945
- self . unique_access ( pointer , len , WriteType :: Write , global)
920
+ self . unique_access ( alloc_id , range , WriteType :: Write , global)
946
921
}
947
922
948
923
/// Detect data-races for an unsynchronized deallocate operation, will not perform
@@ -951,11 +926,11 @@ impl VClockAlloc {
951
926
/// operation
952
927
pub fn deallocate < ' tcx > (
953
928
& mut self ,
954
- pointer : Pointer < Tag > ,
955
- len : Size ,
929
+ alloc_id : AllocId ,
930
+ range : AllocRange ,
956
931
global : & mut GlobalState ,
957
932
) -> InterpResult < ' tcx > {
958
- self . unique_access ( pointer , len , WriteType :: Deallocate , global)
933
+ self . unique_access ( alloc_id , range , WriteType :: Deallocate , global)
959
934
}
960
935
}
961
936
@@ -1002,12 +977,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1002
977
result
1003
978
}
1004
979
1005
- /// Generic atomic operation implementation,
1006
- /// this accesses memory via get_raw instead of
1007
- /// get_raw_mut, due to issues calling get_raw_mut
1008
- /// for atomic loads from read-only memory.
1009
- /// FIXME: is this valid, or should get_raw_mut be used for
1010
- /// atomic-stores/atomic-rmw?
980
+ /// Generic atomic operation implementation
1011
981
fn validate_atomic_op < A : Debug + Copy > (
1012
982
& self ,
1013
983
place : & MPlaceTy < ' tcx , Tag > ,
@@ -1023,25 +993,24 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1023
993
let this = self . eval_context_ref ( ) ;
1024
994
if let Some ( data_race) = & this. memory . extra . data_race {
1025
995
if data_race. multi_threaded . get ( ) {
996
+ let size = place. layout . size ;
997
+ let ( alloc_id, base_offset, ptr) = this. memory . ptr_get_alloc ( place. ptr ) ?;
1026
998
// Load and log the atomic operation.
1027
999
// Note that atomic loads are possible even from read-only allocations, so `get_alloc_extra_mut` is not an option.
1028
- let place_ptr = place. ptr . assert_ptr ( ) ;
1029
- let size = place. layout . size ;
1030
1000
let alloc_meta =
1031
- & this. memory . get_alloc_extra ( place_ptr . alloc_id ) ?. data_race . as_ref ( ) . unwrap ( ) ;
1001
+ & this. memory . get_alloc_extra ( alloc_id) ?. data_race . as_ref ( ) . unwrap ( ) ;
1032
1002
log:: trace!(
1033
- "Atomic op({}) with ordering {:?} on memory( {:?}, offset={}, size={})" ,
1003
+ "Atomic op({}) with ordering {:?} on {:?} ( size={})" ,
1034
1004
description,
1035
1005
& atomic,
1036
- place_ptr. alloc_id,
1037
- place_ptr. offset. bytes( ) ,
1006
+ ptr,
1038
1007
size. bytes( )
1039
1008
) ;
1040
1009
1041
1010
// Perform the atomic operation.
1042
1011
data_race. maybe_perform_sync_operation ( |index, mut clocks| {
1043
- for ( _ , range) in
1044
- alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( place_ptr . offset , size)
1012
+ for ( offset , range) in
1013
+ alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( base_offset , size)
1045
1014
{
1046
1015
if let Err ( DataRace ) = op ( range, & mut * clocks, index, atomic) {
1047
1016
mem:: drop ( clocks) ;
@@ -1050,8 +1019,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1050
1019
range,
1051
1020
description,
1052
1021
true ,
1053
- place_ptr,
1054
- size,
1022
+ Pointer :: new ( alloc_id, offset) ,
1055
1023
)
1056
1024
. map ( |_| true ) ;
1057
1025
}
@@ -1063,12 +1031,11 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
1063
1031
1064
1032
// Log changes to atomic memory.
1065
1033
if log:: log_enabled!( log:: Level :: Trace ) {
1066
- for ( _ , range) in alloc_meta. alloc_ranges . borrow ( ) . iter ( place_ptr . offset , size)
1034
+ for ( _offset , range) in alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset , size)
1067
1035
{
1068
1036
log:: trace!(
1069
- "Updated atomic memory({:?}, offset={}, size={}) to {:#?}" ,
1070
- place. ptr. assert_ptr( ) . alloc_id,
1071
- place_ptr. offset. bytes( ) ,
1037
+ "Updated atomic memory({:?}, size={}) to {:#?}" ,
1038
+ ptr,
1072
1039
size. bytes( ) ,
1073
1040
range. atomic_ops
1074
1041
) ;
0 commit comments