@@ -4663,120 +4663,86 @@ void Compiler::fgDoReversePostOrderLayout()
4663
4663
}
4664
4664
#endif // DEBUG
4665
4665
4666
- // If LSRA didn't create any new blocks, we can reuse its loop-aware RPO traversal,
4667
- // which is cached in Compiler::fgBBs.
4668
- // If the cache isn't available, we need to recompute the loop-aware RPO.
4666
+ // If LSRA didn't create any new blocks, we can reuse its flowgraph annotations.
4669
4667
//
4670
- BasicBlock** rpoSequence = fgBBs;
4671
-
4672
- if (rpoSequence == nullptr )
4668
+ if (m_dfsTree == nullptr )
4673
4669
{
4674
- assert (m_dfsTree == nullptr );
4675
- m_dfsTree = fgComputeDfs</* useProfile */ true >();
4676
- FlowGraphNaturalLoops* const loops = FlowGraphNaturalLoops::Find (m_dfsTree);
4677
- rpoSequence = new (this , CMK_BasicBlock) BasicBlock*[m_dfsTree->GetPostOrderCount ()];
4678
- unsigned index = 0 ;
4679
- auto addToSequence = [rpoSequence, &index ](BasicBlock* block) {
4680
- rpoSequence[index ++] = block;
4681
- };
4682
-
4683
- fgVisitBlocksInLoopAwareRPO (m_dfsTree, loops, addToSequence);
4670
+ m_dfsTree = fgComputeDfs</* useProfile */ true >();
4671
+ m_loops = FlowGraphNaturalLoops::Find (m_dfsTree);
4684
4672
}
4685
4673
else
4686
4674
{
4687
- assert (m_dfsTree != nullptr );
4675
+ assert (m_loops != nullptr );
4688
4676
}
4689
4677
4690
- // Fast path: We don't have any EH regions, so just reorder the blocks
4691
- //
4692
- if (compHndBBtabCount == 0 )
4693
- {
4694
- for (unsigned i = 1 ; i < m_dfsTree->GetPostOrderCount (); i++)
4678
+ BasicBlock** const rpoSequence = new (this , CMK_BasicBlock) BasicBlock*[m_dfsTree->GetPostOrderCount ()];
4679
+ unsigned numBlocks = 0 ;
4680
+ auto addToSequence = [rpoSequence, &numBlocks](BasicBlock* block) {
4681
+ // Exclude handler regions from being reordered.
4682
+ //
4683
+ if (!block->hasHndIndex ())
4695
4684
{
4696
- BasicBlock* const block = rpoSequence[i - 1 ];
4697
- BasicBlock* const blockToMove = rpoSequence[i];
4698
-
4699
- if (!block->NextIs (blockToMove))
4700
- {
4701
- fgUnlinkBlock (blockToMove);
4702
- fgInsertBBafter (block, blockToMove);
4703
- }
4685
+ rpoSequence[numBlocks++] = block;
4704
4686
}
4687
+ };
4705
4688
4706
- fgMoveHotJumps</* hasEH */ false >();
4707
-
4708
- return ;
4709
- }
4689
+ fgVisitBlocksInLoopAwareRPO (m_dfsTree, m_loops, addToSequence);
4710
4690
4711
- // The RPO will break up call-finally pairs, so save them before re-ordering
4691
+ // Reorder blocks.
4712
4692
//
4713
- struct CallFinallyPair
4693
+ for ( unsigned i = 1 ; i < numBlocks; i++)
4714
4694
{
4715
- BasicBlock* callFinally ;
4716
- BasicBlock* callFinallyRet ;
4695
+ BasicBlock* block = rpoSequence[i - 1 ] ;
4696
+ BasicBlock* const blockToMove = rpoSequence[i] ;
4717
4697
4718
- // Constructor provided so we can call ArrayStack::Emplace
4719
- //
4720
- CallFinallyPair (BasicBlock* first, BasicBlock* second)
4721
- : callFinally(first)
4722
- , callFinallyRet(second)
4698
+ if (block->NextIs (blockToMove))
4723
4699
{
4700
+ continue ;
4724
4701
}
4725
- };
4726
-
4727
- ArrayStack<CallFinallyPair> callFinallyPairs (getAllocator ());
4728
4702
4729
- for (EHblkDsc* const HBtab : EHClauses ( this ))
4730
- {
4731
- if (HBtab-> HasFinallyHandler ( ))
4703
+ // Only reorder blocks within the same try region. We don't want to make them non-contiguous.
4704
+ //
4705
+ if (! BasicBlock::sameTryRegion (block, blockToMove ))
4732
4706
{
4733
- for (BasicBlock* const pred : HBtab->ebdHndBeg ->PredBlocks ())
4734
- {
4735
- assert (pred->KindIs (BBJ_CALLFINALLY));
4736
- if (pred->isBBCallFinallyPair ())
4737
- {
4738
- callFinallyPairs.Emplace (pred, pred->Next ());
4739
- }
4740
- }
4707
+ continue ;
4741
4708
}
4742
- }
4743
4709
4744
- // Reorder blocks
4745
- //
4746
- for (unsigned i = 1 ; i < m_dfsTree->GetPostOrderCount (); i++)
4747
- {
4748
- BasicBlock* const block = rpoSequence[i - 1 ];
4749
- BasicBlock* const blockToMove = rpoSequence[i];
4710
+ // Don't move call-finally pair tails independently.
4711
+ // When we encounter the head, we will move the entire pair.
4712
+ //
4713
+ if (blockToMove->isBBCallFinallyPairTail ())
4714
+ {
4715
+ continue ;
4716
+ }
4750
4717
4751
- // Only reorder blocks within the same EH region -- we don't want to make them non-contiguous
4718
+ // Don't break up call-finally pairs by inserting something in the middle.
4752
4719
//
4753
- if (BasicBlock::sameEHRegion ( block, blockToMove ))
4720
+ if (block-> isBBCallFinallyPair ( ))
4754
4721
{
4755
- // Don't reorder EH regions with filter handlers -- we want the filter to come first
4756
- //
4757
- if (block->hasHndIndex () && ehGetDsc (block->getHndIndex ())->HasFilter ())
4758
- {
4759
- continue ;
4760
- }
4722
+ block = block->Next ();
4723
+ }
4761
4724
4762
- if (!block->NextIs (blockToMove))
4763
- {
4764
- fgUnlinkBlock (blockToMove);
4765
- fgInsertBBafter (block, blockToMove);
4766
- }
4725
+ if (blockToMove->isBBCallFinallyPair ())
4726
+ {
4727
+ BasicBlock* const callFinallyRet = blockToMove->Next ();
4728
+ fgUnlinkRange (blockToMove, callFinallyRet);
4729
+ fgMoveBlocksAfter (blockToMove, callFinallyRet, block);
4730
+ }
4731
+ else
4732
+ {
4733
+ fgUnlinkBlock (blockToMove);
4734
+ fgInsertBBafter (block, blockToMove);
4767
4735
}
4768
4736
}
4769
4737
4770
- // Fix up call-finally pairs
4771
- //
4772
- for (int i = 0 ; i < callFinallyPairs.Height (); i++)
4738
+ if (compHndBBtabCount == 0 )
4773
4739
{
4774
- const CallFinallyPair& pair = callFinallyPairs.BottomRef (i);
4775
- fgUnlinkBlock (pair.callFinallyRet );
4776
- fgInsertBBafter (pair.callFinally , pair.callFinallyRet );
4740
+ fgMoveHotJumps</* hasEH */ false >();
4741
+ }
4742
+ else
4743
+ {
4744
+ fgMoveHotJumps</* hasEH */ true >();
4777
4745
}
4778
-
4779
- fgMoveHotJumps</* hasEH */ true >();
4780
4746
}
4781
4747
4782
4748
// -----------------------------------------------------------------------------
@@ -5133,14 +5099,6 @@ void Compiler::ThreeOptLayout::ConsiderEdge(FlowEdge* edge)
5133
5099
return ;
5134
5100
}
5135
5101
5136
- // Don't waste time reordering within handler regions.
5137
- // Note that if a finally region is sufficiently hot,
5138
- // we should have cloned it into the main method body already.
5139
- if (srcBlk->hasHndIndex () || dstBlk->hasHndIndex ())
5140
- {
5141
- return ;
5142
- }
5143
-
5144
5102
// For backward jumps, we will consider partitioning before 'srcBlk'.
5145
5103
// If 'srcBlk' is a BBJ_CALLFINALLYRET, this partition will split up a call-finally pair.
5146
5104
// Thus, don't consider edges out of BBJ_CALLFINALLYRET blocks.
@@ -5256,7 +5214,8 @@ void Compiler::ThreeOptLayout::Run()
5256
5214
// Initialize the current block order
5257
5215
for (BasicBlock* const block : compiler->Blocks (compiler->fgFirstBB , finalBlock))
5258
5216
{
5259
- if (!compiler->m_dfsTree ->Contains (block))
5217
+ // Exclude unreachable blocks and handler blocks from being reordered
5218
+ if (!compiler->m_dfsTree ->Contains (block) || block->hasHndIndex ())
5260
5219
{
5261
5220
continue ;
5262
5221
}
@@ -5289,14 +5248,14 @@ void Compiler::ThreeOptLayout::Run()
5289
5248
continue ;
5290
5249
}
5291
5250
5292
- // Only reorder within EH regions to maintain contiguity.
5293
- if (!BasicBlock::sameEHRegion (block, next))
5251
+ // Only reorder within try regions to maintain contiguity.
5252
+ if (!BasicBlock::sameTryRegion (block, next))
5294
5253
{
5295
5254
continue ;
5296
5255
}
5297
5256
5298
- // Don't move the entry of an EH region.
5299
- if (compiler->bbIsTryBeg (next) || compiler-> bbIsHandlerBeg (next) )
5257
+ // Don't move the entry of a try region.
5258
+ if (compiler->bbIsTryBeg (next))
5300
5259
{
5301
5260
continue ;
5302
5261
}
0 commit comments