@@ -8,6 +8,7 @@ use rustc_data_structures::sharded::Sharded;
8
8
#[ cfg( not( parallel_compiler) ) ]
9
9
use rustc_data_structures:: sync:: Lock ;
10
10
use rustc_data_structures:: sync:: WorkerLocal ;
11
+ use rustc_index:: vec:: { Idx , IndexVec } ;
11
12
use std:: default:: Default ;
12
13
use std:: fmt:: Debug ;
13
14
use std:: hash:: Hash ;
@@ -207,3 +208,174 @@ where
207
208
}
208
209
}
209
210
}
211
+
212
+ pub struct VecCache < K : Idx , V > {
213
+ #[ cfg( parallel_compiler) ]
214
+ cache : Sharded < IndexVec < K , Option < ( V , DepNodeIndex ) > > > ,
215
+ #[ cfg( not( parallel_compiler) ) ]
216
+ cache : Lock < IndexVec < K , Option < ( V , DepNodeIndex ) > > > ,
217
+ }
218
+
219
+ impl < K : Idx , V > Default for VecCache < K , V > {
220
+ fn default ( ) -> Self {
221
+ VecCache { cache : Default :: default ( ) }
222
+ }
223
+ }
224
+
225
+ impl < K : Eq + Idx , V : Clone + Debug > QueryStorage for VecCache < K , V > {
226
+ type Value = V ;
227
+ type Stored = V ;
228
+
229
+ #[ inline]
230
+ fn store_nocache ( & self , value : Self :: Value ) -> Self :: Stored {
231
+ // We have no dedicated storage
232
+ value
233
+ }
234
+ }
235
+
236
+ impl < K , V > QueryCache for VecCache < K , V >
237
+ where
238
+ K : Eq + Idx + Clone + Debug ,
239
+ V : Clone + Debug ,
240
+ {
241
+ type Key = K ;
242
+
243
+ #[ inline( always) ]
244
+ fn lookup < R , OnHit > ( & self , key : & K , on_hit : OnHit ) -> Result < R , ( ) >
245
+ where
246
+ OnHit : FnOnce ( & V , DepNodeIndex ) -> R ,
247
+ {
248
+ #[ cfg( parallel_compiler) ]
249
+ let lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
250
+ #[ cfg( not( parallel_compiler) ) ]
251
+ let lock = self . cache . lock ( ) ;
252
+ if let Some ( Some ( value) ) = lock. get ( * key) {
253
+ let hit_result = on_hit ( & value. 0 , value. 1 ) ;
254
+ Ok ( hit_result)
255
+ } else {
256
+ Err ( ( ) )
257
+ }
258
+ }
259
+
260
+ #[ inline]
261
+ fn complete ( & self , key : K , value : V , index : DepNodeIndex ) -> Self :: Stored {
262
+ #[ cfg( parallel_compiler) ]
263
+ let mut lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
264
+ #[ cfg( not( parallel_compiler) ) ]
265
+ let mut lock = self . cache . lock ( ) ;
266
+ lock. insert ( key, ( value. clone ( ) , index) ) ;
267
+ value
268
+ }
269
+
270
+ fn iter ( & self , f : & mut dyn FnMut ( & Self :: Key , & Self :: Value , DepNodeIndex ) ) {
271
+ #[ cfg( parallel_compiler) ]
272
+ {
273
+ let shards = self . cache . lock_shards ( ) ;
274
+ for shard in shards. iter ( ) {
275
+ for ( k, v) in shard. iter_enumerated ( ) {
276
+ if let Some ( v) = v {
277
+ f ( & k, & v. 0 , v. 1 ) ;
278
+ }
279
+ }
280
+ }
281
+ }
282
+ #[ cfg( not( parallel_compiler) ) ]
283
+ {
284
+ let map = self . cache . lock ( ) ;
285
+ for ( k, v) in map. iter_enumerated ( ) {
286
+ if let Some ( v) = v {
287
+ f ( & k, & v. 0 , v. 1 ) ;
288
+ }
289
+ }
290
+ }
291
+ }
292
+ }
293
+
294
+ pub struct VecArenaCache < ' tcx , K : Idx , V > {
295
+ arena : WorkerLocal < TypedArena < ( V , DepNodeIndex ) > > ,
296
+ #[ cfg( parallel_compiler) ]
297
+ cache : Sharded < IndexVec < K , Option < & ' tcx ( V , DepNodeIndex ) > > > ,
298
+ #[ cfg( not( parallel_compiler) ) ]
299
+ cache : Lock < IndexVec < K , Option < & ' tcx ( V , DepNodeIndex ) > > > ,
300
+ }
301
+
302
+ impl < ' tcx , K : Idx , V > Default for VecArenaCache < ' tcx , K , V > {
303
+ fn default ( ) -> Self {
304
+ VecArenaCache {
305
+ arena : WorkerLocal :: new ( |_| TypedArena :: default ( ) ) ,
306
+ cache : Default :: default ( ) ,
307
+ }
308
+ }
309
+ }
310
+
311
+ impl < ' tcx , K : Eq + Idx , V : Debug + ' tcx > QueryStorage for VecArenaCache < ' tcx , K , V > {
312
+ type Value = V ;
313
+ type Stored = & ' tcx V ;
314
+
315
+ #[ inline]
316
+ fn store_nocache ( & self , value : Self :: Value ) -> Self :: Stored {
317
+ let value = self . arena . alloc ( ( value, DepNodeIndex :: INVALID ) ) ;
318
+ let value = unsafe { & * ( & value. 0 as * const _ ) } ;
319
+ & value
320
+ }
321
+ }
322
+
323
+ impl < ' tcx , K , V : ' tcx > QueryCache for VecArenaCache < ' tcx , K , V >
324
+ where
325
+ K : Eq + Idx + Clone + Debug ,
326
+ V : Debug ,
327
+ {
328
+ type Key = K ;
329
+
330
+ #[ inline( always) ]
331
+ fn lookup < R , OnHit > ( & self , key : & K , on_hit : OnHit ) -> Result < R , ( ) >
332
+ where
333
+ OnHit : FnOnce ( & & ' tcx V , DepNodeIndex ) -> R ,
334
+ {
335
+ #[ cfg( parallel_compiler) ]
336
+ let lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
337
+ #[ cfg( not( parallel_compiler) ) ]
338
+ let lock = self . cache . lock ( ) ;
339
+ if let Some ( Some ( value) ) = lock. get ( * key) {
340
+ let hit_result = on_hit ( & & value. 0 , value. 1 ) ;
341
+ Ok ( hit_result)
342
+ } else {
343
+ Err ( ( ) )
344
+ }
345
+ }
346
+
347
+ #[ inline]
348
+ fn complete ( & self , key : K , value : V , index : DepNodeIndex ) -> Self :: Stored {
349
+ let value = self . arena . alloc ( ( value, index) ) ;
350
+ let value = unsafe { & * ( value as * const _ ) } ;
351
+ #[ cfg( parallel_compiler) ]
352
+ let mut lock = self . cache . get_shard_by_hash ( key. index ( ) as u64 ) . lock ( ) ;
353
+ #[ cfg( not( parallel_compiler) ) ]
354
+ let mut lock = self . cache . lock ( ) ;
355
+ lock. insert ( key, value) ;
356
+ & value. 0
357
+ }
358
+
359
+ fn iter ( & self , f : & mut dyn FnMut ( & Self :: Key , & Self :: Value , DepNodeIndex ) ) {
360
+ #[ cfg( parallel_compiler) ]
361
+ {
362
+ let shards = self . cache . lock_shards ( ) ;
363
+ for shard in shards. iter ( ) {
364
+ for ( k, v) in shard. iter_enumerated ( ) {
365
+ if let Some ( v) = v {
366
+ f ( & k, & v. 0 , v. 1 ) ;
367
+ }
368
+ }
369
+ }
370
+ }
371
+ #[ cfg( not( parallel_compiler) ) ]
372
+ {
373
+ let map = self . cache . lock ( ) ;
374
+ for ( k, v) in map. iter_enumerated ( ) {
375
+ if let Some ( v) = v {
376
+ f ( & k, & v. 0 , v. 1 ) ;
377
+ }
378
+ }
379
+ }
380
+ }
381
+ }
0 commit comments