@@ -229,6 +229,27 @@ pub trait Analysis<'tcx> {
229
229
unreachable ! ( ) ;
230
230
}
231
231
232
+ #[ inline]
233
+ fn iterate_to_fixpoint < ' mir > (
234
+ self ,
235
+ tcx : TyCtxt < ' tcx > ,
236
+ body : & ' mir mir:: Body < ' tcx > ,
237
+ pass_name : Option < & ' static str > ,
238
+ ) -> AnalysisAndResults < ' tcx , Self >
239
+ where
240
+ Self : Sized ,
241
+ Self :: Domain : DebugWithContext < Self > ,
242
+ {
243
+ // Computing dataflow over the SCCs is only supported in forward analyses. It's also
244
+ // unnecessary to use it on acyclic graphs, as the condensation graph is of course the same
245
+ // as the CFG itself.
246
+ if Self :: Direction :: IS_BACKWARD || !body. basic_blocks . is_cfg_cyclic ( ) {
247
+ self . iterate_to_fixpoint_per_block ( tcx, body, pass_name)
248
+ } else {
249
+ self . iterate_to_fixpoint_per_scc ( tcx, body, pass_name)
250
+ }
251
+ }
252
+
232
253
/* Extension methods */
233
254
234
255
/// Finds the fixpoint for this dataflow problem.
@@ -244,7 +265,7 @@ pub trait Analysis<'tcx> {
244
265
/// dataflow analysis. Some analyses are run multiple times in the compilation pipeline.
245
266
/// Without a `pass_name` to differentiates them, only the results for the latest run will be
246
267
/// saved.
247
- fn iterate_to_fixpoint < ' mir > (
268
+ fn iterate_to_fixpoint_per_block < ' mir > (
248
269
mut self ,
249
270
tcx : TyCtxt < ' tcx > ,
250
271
body : & ' mir mir:: Body < ' tcx > ,
@@ -308,6 +329,92 @@ pub trait Analysis<'tcx> {
308
329
309
330
AnalysisAndResults { analysis : self , results }
310
331
}
332
+
333
+ fn iterate_to_fixpoint_per_scc < ' mir > (
334
+ mut self ,
335
+ _tcx : TyCtxt < ' tcx > ,
336
+ body : & ' mir mir:: Body < ' tcx > ,
337
+ _pass_name : Option < & ' static str > ,
338
+ ) -> AnalysisAndResults < ' tcx , Self >
339
+ where
340
+ Self : Sized ,
341
+ Self :: Domain : DebugWithContext < Self > ,
342
+ {
343
+ assert ! ( Self :: Direction :: IS_FORWARD ) ;
344
+
345
+ let sccs = body. basic_blocks . sccs ( ) ;
346
+
347
+ struct VecQueue < T : Idx > {
348
+ queue : Vec < T > ,
349
+ set : DenseBitSet < T > ,
350
+ }
351
+
352
+ impl < T : Idx > VecQueue < T > {
353
+ #[ inline]
354
+ fn with_none ( len : usize ) -> Self {
355
+ VecQueue { queue : Vec :: with_capacity ( len) , set : DenseBitSet :: new_empty ( len) }
356
+ }
357
+
358
+ #[ inline]
359
+ fn insert ( & mut self , element : T ) {
360
+ if self . set . insert ( element) {
361
+ self . queue . push ( element) ;
362
+ }
363
+ }
364
+ }
365
+
366
+ let mut scc_queue = VecQueue :: with_none ( sccs. component_count ) ;
367
+ for & bb in body. basic_blocks . reverse_postorder ( ) . iter ( ) {
368
+ // let scc = sccs.components[bb.as_usize()];
369
+ let scc = sccs. components [ bb] ;
370
+ scc_queue. insert ( scc) ;
371
+ }
372
+ // assert_eq!(scc_queue.queue, sccs.queue);
373
+
374
+ let mut results = IndexVec :: from_fn_n ( |_| self . bottom_value ( body) , body. basic_blocks . len ( ) ) ;
375
+ self . initialize_start_block ( body, & mut results[ mir:: START_BLOCK ] ) ;
376
+
377
+ // Worklist for per-SCC iterations
378
+ let mut dirty_queue: WorkQueue < BasicBlock > = WorkQueue :: with_none ( body. basic_blocks . len ( ) ) ;
379
+
380
+ let mut state = self . bottom_value ( body) ;
381
+
382
+ for & scc in & scc_queue. queue {
383
+ // les blocks doivent être ajoutés en RPO
384
+ // for block in sccs.blocks_in_rpo(scc as usize) {
385
+ for block in sccs. sccs [ scc as usize ] . iter ( ) . copied ( ) {
386
+ dirty_queue. insert ( block) ;
387
+ }
388
+
389
+ while let Some ( bb) = dirty_queue. pop ( ) {
390
+ // Set the state to the entry state of the block. This is equivalent to `state =
391
+ // results[bb].clone()`, but it saves an allocation, thus improving compile times.
392
+ state. clone_from ( & results[ bb] ) ;
393
+
394
+ Self :: Direction :: apply_effects_in_block (
395
+ & mut self ,
396
+ body,
397
+ & mut state,
398
+ bb,
399
+ & body[ bb] ,
400
+ |target : BasicBlock , state : & Self :: Domain | {
401
+ let set_changed = results[ target] . join ( state) ;
402
+ // let target_scc = sccs.components[target.as_usize()];
403
+ let target_scc = sccs. components [ target] ;
404
+ if set_changed && target_scc == scc {
405
+ // The target block is in the SCC we're currently processing, and we
406
+ // want to process this block until fixpoint. Otherwise, the target
407
+ // block is in a successor SCC and it will be processed when that SCC is
408
+ // encountered later.
409
+ dirty_queue. insert ( target) ;
410
+ }
411
+ } ,
412
+ ) ;
413
+ }
414
+ }
415
+
416
+ AnalysisAndResults { analysis : self , results }
417
+ }
311
418
}
312
419
313
420
/// The legal operations for a transfer function in a gen/kill problem.
0 commit comments