8
8
9
9
use std:: assert_matches:: assert_matches;
10
10
use std:: borrow:: { Borrow , Cow } ;
11
+ use std:: cell:: Cell ;
11
12
use std:: collections:: VecDeque ;
12
- use std:: { fmt, mem , ptr} ;
13
+ use std:: { fmt, ptr} ;
13
14
14
15
use rustc_abi:: { Align , HasDataLayout , Size } ;
15
16
use rustc_ast:: Mutability ;
@@ -131,7 +132,7 @@ pub struct Memory<'tcx, M: Machine<'tcx>> {
131
132
/// This stores whether we are currently doing reads purely for the purpose of validation.
132
133
/// Those reads do not trigger the machine's hooks for memory reads.
133
134
/// Needless to say, this must only be set with great care!
134
- validation_in_progress : bool ,
135
+ validation_in_progress : Cell < bool > ,
135
136
}
136
137
137
138
/// A reference to some allocation that was already bounds-checked for the given region
@@ -158,7 +159,7 @@ impl<'tcx, M: Machine<'tcx>> Memory<'tcx, M> {
158
159
alloc_map : M :: MemoryMap :: default ( ) ,
159
160
extra_fn_ptr_map : FxIndexMap :: default ( ) ,
160
161
dead_alloc_map : FxIndexMap :: default ( ) ,
161
- validation_in_progress : false ,
162
+ validation_in_progress : Cell :: new ( false ) ,
162
163
}
163
164
}
164
165
@@ -715,15 +716,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
715
716
// We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
716
717
// accesses. That means we cannot rely on the closure above or the `Some` branch below. We
717
718
// do this after `check_and_deref_ptr` to ensure some basic sanity has already been checked.
718
- if !self . memory . validation_in_progress {
719
+ if !self . memory . validation_in_progress . get ( ) {
719
720
if let Ok ( ( alloc_id, ..) ) = self . ptr_try_get_alloc_id ( ptr, size_i64) {
720
721
M :: before_alloc_read ( self , alloc_id) ?;
721
722
}
722
723
}
723
724
724
725
if let Some ( ( alloc_id, offset, prov, alloc) ) = ptr_and_alloc {
725
726
let range = alloc_range ( offset, size) ;
726
- if !self . memory . validation_in_progress {
727
+ if !self . memory . validation_in_progress . get ( ) {
727
728
M :: before_memory_read (
728
729
self . tcx ,
729
730
& self . machine ,
@@ -801,7 +802,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
801
802
) -> InterpResult < ' tcx , Option < AllocRefMut < ' a , ' tcx , M :: Provenance , M :: AllocExtra , M :: Bytes > > >
802
803
{
803
804
let tcx = self . tcx ;
804
- let validation_in_progress = self . memory . validation_in_progress ;
805
+ let validation_in_progress = self . memory . validation_in_progress . get ( ) ;
805
806
806
807
let size_i64 = i64:: try_from ( size. bytes ( ) ) . unwrap ( ) ; // it would be an error to even ask for more than isize::MAX bytes
807
808
let ptr_and_alloc = Self :: check_and_deref_ptr (
@@ -1087,23 +1088,43 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1087
1088
///
1088
1089
/// We do this so Miri's allocation access tracking does not show the validation
1089
1090
/// reads as spurious accesses.
1090
- pub fn run_for_validation < R > ( & mut self , f : impl FnOnce ( & mut Self ) -> R ) -> R {
1091
+ pub fn run_for_validation_mut < R > ( & mut self , f : impl FnOnce ( & mut Self ) -> R ) -> R {
1091
1092
// This deliberately uses `==` on `bool` to follow the pattern
1092
1093
// `assert!(val.replace(new) == old)`.
1093
1094
assert ! (
1094
- mem :: replace ( & mut self . memory. validation_in_progress, true ) == false ,
1095
+ self . memory. validation_in_progress. replace ( true ) == false ,
1095
1096
"`validation_in_progress` was already set"
1096
1097
) ;
1097
1098
let res = f ( self ) ;
1098
1099
assert ! (
1099
- mem:: replace( & mut self . memory. validation_in_progress, false ) == true ,
1100
+ self . memory. validation_in_progress. replace( false ) == true ,
1101
+ "`validation_in_progress` was unset by someone else"
1102
+ ) ;
1103
+ res
1104
+ }
1105
+
1106
+ /// Runs the closure in "validation" mode, which means the machine's memory read hooks will be
1107
+ /// suppressed. Needless to say, this must only be set with great care! Cannot be nested.
1108
+ ///
1109
+ /// We do this so Miri's allocation access tracking does not show the validation
1110
+ /// reads as spurious accesses.
1111
+ pub fn run_for_validation_ref < R > ( & self , f : impl FnOnce ( & Self ) -> R ) -> R {
1112
+ // This deliberately uses `==` on `bool` to follow the pattern
1113
+ // `assert!(val.replace(new) == old)`.
1114
+ assert ! (
1115
+ self . memory. validation_in_progress. replace( true ) == false ,
1116
+ "`validation_in_progress` was already set"
1117
+ ) ;
1118
+ let res = f ( self ) ;
1119
+ assert ! (
1120
+ self . memory. validation_in_progress. replace( false ) == true ,
1100
1121
"`validation_in_progress` was unset by someone else"
1101
1122
) ;
1102
1123
res
1103
1124
}
1104
1125
1105
1126
pub ( super ) fn validation_in_progress ( & self ) -> bool {
1106
- self . memory . validation_in_progress
1127
+ self . memory . validation_in_progress . get ( )
1107
1128
}
1108
1129
}
1109
1130
@@ -1375,7 +1396,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
1375
1396
} ;
1376
1397
let src_alloc = self . get_alloc_raw ( src_alloc_id) ?;
1377
1398
let src_range = alloc_range ( src_offset, size) ;
1378
- assert ! ( !self . memory. validation_in_progress, "we can't be copying during validation" ) ;
1399
+ assert ! ( !self . memory. validation_in_progress. get ( ) , "we can't be copying during validation" ) ;
1379
1400
// For the overlapping case, it is crucial that we trigger the read hook
1380
1401
// before the write hook -- the aliasing model cares about the order.
1381
1402
M :: before_memory_read (
0 commit comments