@@ -44,11 +44,14 @@ use rustc_data_structures::fx::FxHashMap;
44
44
use rustc_data_structures:: profiling:: SelfProfilerRef ;
45
45
use rustc_data_structures:: sync:: Lock ;
46
46
use rustc_data_structures:: unhash:: UnhashMap ;
47
+ use rustc_data_structures:: { jobserver, outline} ;
47
48
use rustc_index:: { Idx , IndexVec } ;
48
49
use rustc_serialize:: opaque:: { FileEncodeResult , FileEncoder , IntEncodedWithFixedSize , MemDecoder } ;
49
50
use rustc_serialize:: { Decodable , Decoder , Encodable , Encoder } ;
50
- use std :: iter ;
51
+ use rustc_session :: Session ;
51
52
use std:: marker:: PhantomData ;
53
+ use std:: sync:: { Arc , OnceLock } ;
54
+ use std:: { iter, thread} ;
52
55
53
56
// The maximum value of `SerializedDepNodeIndex` leaves the upper two bits
54
57
// unused so that we can store multiple index types in `CompressedHybridIndex`,
@@ -70,23 +73,47 @@ const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1;
70
73
const DEP_NODE_WIDTH_BITS : usize = DEP_NODE_SIZE / 2 ;
71
74
72
75
/// Data for use when recompiling the **current crate**.
73
- #[ derive( Debug , Default ) ]
74
76
pub struct SerializedDepGraph {
75
77
/// The set of all DepNodes in the graph
76
78
nodes : IndexVec < SerializedDepNodeIndex , DepNode > ,
79
+
77
80
/// The set of all Fingerprints in the graph. Each Fingerprint corresponds to
78
81
/// the DepNode at the same index in the nodes vector.
79
82
fingerprints : IndexVec < SerializedDepNodeIndex , Fingerprint > ,
83
+
80
84
/// For each DepNode, stores the list of edges originating from that
81
85
/// DepNode. Encoded as a [start, end) pair indexing into edge_list_data,
82
86
/// which holds the actual DepNodeIndices of the target nodes.
83
87
edge_list_indices : IndexVec < SerializedDepNodeIndex , EdgeHeader > ,
88
+
84
89
/// A flattened list of all edge targets in the graph, stored in the same
85
90
/// varint encoding that we use on disk. Edge sources are implicit in edge_list_indices.
86
91
edge_list_data : Vec < u8 > ,
92
+
87
93
/// Stores a map from fingerprints to nodes per dep node kind.
88
- /// This is the reciprocal of `nodes`.
89
- index : Vec < UnhashMap < PackedFingerprint , SerializedDepNodeIndex > > ,
94
+ /// This is the reciprocal of `nodes`. This is computed on demand for each dep kind.
95
+ /// The entire index is also computed in a background thread.
96
+ index : Vec < OnceLock < UnhashMap < PackedFingerprint , SerializedDepNodeIndex > > > ,
97
+
98
+ /// Stores the number of node for each dep node kind.
99
+ index_sizes : Vec < usize > ,
100
+
101
+ /// A profiler reference for used in the index prefetching thread.
102
+ prof : SelfProfilerRef ,
103
+ }
104
+
105
+ impl Default for SerializedDepGraph {
106
+ fn default ( ) -> Self {
107
+ SerializedDepGraph {
108
+ nodes : Default :: default ( ) ,
109
+ fingerprints : Default :: default ( ) ,
110
+ edge_list_indices : Default :: default ( ) ,
111
+ edge_list_data : Default :: default ( ) ,
112
+ index : Default :: default ( ) ,
113
+ index_sizes : Default :: default ( ) ,
114
+ prof : SelfProfilerRef :: new ( None , None ) ,
115
+ }
116
+ }
90
117
}
91
118
92
119
impl SerializedDepGraph {
@@ -127,9 +154,35 @@ impl SerializedDepGraph {
127
154
self . nodes [ dep_node_index]
128
155
}
129
156
157
+ /// This computes and sets up the index for just the specified `DepKind`.
158
+ fn setup_index ( & self , dep_kind : DepKind ) {
159
+ let _timer = self . prof . generic_activity ( "incr_comp_dep_graph_setup_index" ) ;
160
+
161
+ let mut index = UnhashMap :: with_capacity_and_hasher (
162
+ self . index_sizes [ dep_kind. as_usize ( ) ] ,
163
+ Default :: default ( ) ,
164
+ ) ;
165
+
166
+ for ( idx, node) in self . nodes . iter_enumerated ( ) {
167
+ if node. kind == dep_kind {
168
+ index. insert ( node. hash , idx) ;
169
+ }
170
+ }
171
+
172
+ // This may race with the prefetching thread, but that will set the same value.
173
+ self . index [ dep_kind. as_usize ( ) ] . set ( index) . ok ( ) ;
174
+ }
175
+
130
176
#[ inline]
131
177
pub fn node_to_index_opt ( & self , dep_node : & DepNode ) -> Option < SerializedDepNodeIndex > {
132
- self . index . get ( dep_node. kind . as_usize ( ) ) ?. get ( & dep_node. hash ) . cloned ( )
178
+ let index = self . index . get ( dep_node. kind . as_usize ( ) ) ?;
179
+ let index = index. get ( ) . unwrap_or_else ( || {
180
+ outline ( || {
181
+ self . setup_index ( dep_node. kind ) ;
182
+ self . index [ dep_node. kind . as_usize ( ) ] . get ( ) . unwrap ( )
183
+ } )
184
+ } ) ;
185
+ index. get ( & dep_node. hash ) . cloned ( )
133
186
}
134
187
135
188
#[ inline]
@@ -141,6 +194,44 @@ impl SerializedDepGraph {
141
194
pub fn node_count ( & self ) -> usize {
142
195
self . nodes . len ( )
143
196
}
197
+
198
+ fn prefetch ( & self ) {
199
+ let _timer = self . prof . generic_activity ( "incr_comp_prefetch_dep_graph_index" ) ;
200
+
201
+ let mut index: Vec < _ > = self
202
+ . index_sizes
203
+ . iter ( )
204
+ . map ( |& n| UnhashMap :: with_capacity_and_hasher ( n, Default :: default ( ) ) )
205
+ . collect ( ) ;
206
+
207
+ for ( idx, node) in self . nodes . iter_enumerated ( ) {
208
+ index[ node. kind . as_usize ( ) ] . insert ( node. hash , idx) ;
209
+ }
210
+
211
+ for ( i, index) in index. into_iter ( ) . enumerate ( ) {
212
+ // This may race with `setup_index`, but that will set the same value.
213
+ self . index [ i] . set ( index) . ok ( ) ;
214
+ }
215
+ }
216
+
217
+ /// This spawns a thread that prefetches the index.
218
+ fn spawn_prefetch_thread ( self : & Arc < Self > ) {
219
+ if !self . index . is_empty ( ) {
220
+ let client = jobserver:: client ( ) ;
221
+ if let Ok ( true ) = client. available ( ) . map ( |tokens| tokens > 0 ) {
222
+ if let Ok ( token) = client. acquire ( ) {
223
+ let this = self . clone ( ) ;
224
+ thread:: spawn ( move || {
225
+ this. prefetch ( ) ;
226
+ } ) ;
227
+ return ;
228
+ }
229
+ }
230
+
231
+ // Prefetch the index on the current thread if we don't have a token available.
232
+ self . prefetch ( ) ;
233
+ }
234
+ }
144
235
}
145
236
146
237
/// A packed representation of an edge's start index and byte width.
@@ -175,8 +266,8 @@ fn mask(bits: usize) -> usize {
175
266
}
176
267
177
268
impl SerializedDepGraph {
178
- #[ instrument( level = "debug" , skip( d) ) ]
179
- pub fn decode < D : Deps > ( d : & mut MemDecoder < ' _ > ) -> SerializedDepGraph {
269
+ #[ instrument( level = "debug" , skip( d, sess ) ) ]
270
+ pub fn decode < D : Deps > ( d : & mut MemDecoder < ' _ > , sess : & Session ) -> Arc < SerializedDepGraph > {
180
271
// The last 16 bytes are the node count and edge count.
181
272
debug ! ( "position: {:?}" , d. position( ) ) ;
182
273
let ( node_count, edge_count) =
@@ -243,16 +334,21 @@ impl SerializedDepGraph {
243
334
// end of the array. This padding ensure it doesn't.
244
335
edge_list_data. extend ( & [ 0u8 ; DEP_NODE_PAD ] ) ;
245
336
246
- // Read the number of each dep kind and use it to create an hash map with a suitable size.
247
- let mut index: Vec < _ > = ( 0 ..( D :: DEP_KIND_MAX + 1 ) )
248
- . map ( |_| UnhashMap :: with_capacity_and_hasher ( d. read_u32 ( ) as usize , Default :: default ( ) ) )
249
- . collect ( ) ;
250
-
251
- for ( idx, node) in nodes. iter_enumerated ( ) {
252
- index[ node. kind . as_usize ( ) ] . insert ( node. hash , idx) ;
253
- }
254
-
255
- SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data, index }
337
+ // Read the number of nodes for each dep kind.
338
+ let index_sizes: Vec < _ > =
339
+ ( 0 ..( D :: DEP_KIND_MAX + 1 ) ) . map ( |_| d. read_u32 ( ) as usize ) . collect ( ) ;
340
+
341
+ let result = Arc :: new ( SerializedDepGraph {
342
+ nodes,
343
+ fingerprints,
344
+ edge_list_indices,
345
+ edge_list_data,
346
+ index : ( 0 ..index_sizes. len ( ) ) . map ( |_| OnceLock :: new ( ) ) . collect ( ) ,
347
+ index_sizes,
348
+ prof : sess. prof . clone ( ) ,
349
+ } ) ;
350
+ result. prefetch ( ) ;
351
+ result
256
352
}
257
353
}
258
354
0 commit comments