From 1d352cb36c06a16bc93e424809fb431d5795e741 Mon Sep 17 00:00:00 2001 From: Aman Khalid Date: Tue, 3 Oct 2023 21:30:06 -0400 Subject: [PATCH] JIT: Make BasicBlock::bbJumpKind private (#92908) This is the beginning of a larger effort to disallow the use of BBJ_NONE (reserved for basic blocks that fall through) before the current method's block layout is finalized. --- src/coreclr/jit/assertionprop.cpp | 6 +- src/coreclr/jit/block.cpp | 8 +- src/coreclr/jit/block.h | 20 ++- src/coreclr/jit/codegenarm.cpp | 6 +- src/coreclr/jit/codegenarm64.cpp | 6 +- src/coreclr/jit/codegenarmarch.cpp | 2 +- src/coreclr/jit/codegencommon.cpp | 6 +- src/coreclr/jit/codegenlinear.cpp | 12 +- src/coreclr/jit/codegenloongarch64.cpp | 6 +- src/coreclr/jit/codegenriscv64.cpp | 6 +- src/coreclr/jit/codegenxarch.cpp | 8 +- src/coreclr/jit/compiler.cpp | 4 +- src/coreclr/jit/compiler.hpp | 12 +- src/coreclr/jit/emitarm.cpp | 4 +- src/coreclr/jit/emitarm64.cpp | 4 +- src/coreclr/jit/emitloongarch64.cpp | 2 +- src/coreclr/jit/emitriscv64.cpp | 2 +- src/coreclr/jit/emitxarch.cpp | 4 +- src/coreclr/jit/fgbasic.cpp | 88 ++++++------ src/coreclr/jit/fgdiagnostic.cpp | 47 ++++--- src/coreclr/jit/fgehopt.cpp | 57 ++++---- src/coreclr/jit/fgflow.cpp | 12 +- src/coreclr/jit/fginline.cpp | 14 +- src/coreclr/jit/fgopt.cpp | 144 ++++++++++---------- src/coreclr/jit/fgprofile.cpp | 46 +++---- src/coreclr/jit/fgprofilesynthesis.cpp | 20 +-- src/coreclr/jit/flowgraph.cpp | 38 +++--- src/coreclr/jit/gschecks.cpp | 2 +- src/coreclr/jit/ifconversion.cpp | 8 +- src/coreclr/jit/importer.cpp | 96 ++++++------- src/coreclr/jit/importercalls.cpp | 6 +- src/coreclr/jit/indirectcalltransformer.cpp | 12 +- src/coreclr/jit/jiteh.cpp | 16 +-- src/coreclr/jit/lclvars.cpp | 2 +- src/coreclr/jit/lir.cpp | 2 +- src/coreclr/jit/liveness.cpp | 8 +- src/coreclr/jit/loopcloning.cpp | 32 ++--- src/coreclr/jit/lower.cpp | 29 ++-- src/coreclr/jit/lsra.cpp | 14 +- src/coreclr/jit/morph.cpp | 62 ++++----- src/coreclr/jit/objectalloc.cpp | 2 +- src/coreclr/jit/optimizebools.cpp | 20 +-- src/coreclr/jit/optimizer.cpp | 66 ++++----- src/coreclr/jit/patchpoint.cpp | 4 +- src/coreclr/jit/redundantbranchopts.cpp | 27 ++-- src/coreclr/jit/switchrecognition.cpp | 2 +- 46 files changed, 503 insertions(+), 491 deletions(-) diff --git a/src/coreclr/jit/assertionprop.cpp b/src/coreclr/jit/assertionprop.cpp index c9670fc316c3a..f38878e33fff8 100644 --- a/src/coreclr/jit/assertionprop.cpp +++ b/src/coreclr/jit/assertionprop.cpp @@ -5260,7 +5260,7 @@ class AssertionPropFlowCallback { ASSERT_TP pAssertionOut; - if (predBlock->bbJumpKind == BBJ_COND && (predBlock->bbJumpDest == block)) + if (predBlock->KindIs(BBJ_COND) && (predBlock->bbJumpDest == block)) { pAssertionOut = mJumpDestOut[predBlock->bbNum]; @@ -5460,7 +5460,7 @@ ASSERT_TP* Compiler::optComputeAssertionGen() printf(FMT_BB " valueGen = ", block->bbNum); optPrintAssertionIndices(block->bbAssertionGen); - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" => " FMT_BB " valueGen = ", block->bbJumpDest->bbNum); optPrintAssertionIndices(jumpDestGen[block->bbNum]); @@ -6020,7 +6020,7 @@ PhaseStatus Compiler::optAssertionPropMain() printf(FMT_BB ":\n", block->bbNum); optDumpAssertionIndices(" in = ", block->bbAssertionIn, "\n"); optDumpAssertionIndices(" out = ", block->bbAssertionOut, "\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { printf(" " FMT_BB " = ", block->bbJumpDest->bbNum); optDumpAssertionIndices(bbJtrueAssertionOut[block->bbNum], "\n"); diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp index 8b5cef28a71a8..c2aa5ff45e3f4 100644 --- a/src/coreclr/jit/block.cpp +++ b/src/coreclr/jit/block.cpp @@ -1419,7 +1419,7 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) /* Record the jump kind in the block */ - block->bbJumpKind = jumpKind; + block->SetBBJumpKind(jumpKind DEBUG_ARG(this)); if (jumpKind == BBJ_THROW) { @@ -1499,9 +1499,9 @@ BasicBlock* Compiler::bbNewBasicBlock(BBjumpKinds jumpKind) bool BasicBlock::isBBCallAlwaysPair() const { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) - if (this->bbJumpKind == BBJ_CALLFINALLY) + if (this->KindIs(BBJ_CALLFINALLY)) #else - if ((this->bbJumpKind == BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) + if (this->KindIs(BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL)) #endif { #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -1510,7 +1510,7 @@ bool BasicBlock::isBBCallAlwaysPair() const #endif // Some asserts that the next block is a BBJ_ALWAYS of the proper form. assert(this->bbNext != nullptr); - assert(this->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(this->bbNext->KindIs(BBJ_ALWAYS)); assert(this->bbNext->bbFlags & BBF_KEEP_BBJ_ALWAYS); assert(this->bbNext->isEmpty()); diff --git a/src/coreclr/jit/block.h b/src/coreclr/jit/block.h index 9c7953a12b9e5..88312967936f2 100644 --- a/src/coreclr/jit/block.h +++ b/src/coreclr/jit/block.h @@ -702,8 +702,26 @@ struct BasicBlock : private LIR::Range // a block corresponding to an exit from the try of a try/finally. bool isBBCallAlwaysPairTail() const; +private: BBjumpKinds bbJumpKind; // jump (if any) at the end of this block +public: + BBjumpKinds GetBBJumpKind() const + { + return bbJumpKind; + } + + void SetBBJumpKind(BBjumpKinds kind DEBUG_ARG(Compiler* comp)) + { +#ifdef DEBUG + // BBJ_NONE should only be assigned when optimizing jumps in Compiler::optOptimizeLayout + // TODO: Change assert to check if comp is in appropriate optimization phase to use BBJ_NONE + // (right now, this assertion does the null check to avoid unused variable warnings) + assert((kind != BBJ_NONE) || (comp != nullptr)); +#endif // DEBUG + bbJumpKind = kind; + } + /* The following union describes the jump target(s) of this block */ union { unsigned bbJumpOffs; // PC offset (temporary only) @@ -1556,7 +1574,7 @@ inline BBArrayIterator BBSwitchTargetList::end() const inline BasicBlock::BBSuccList::BBSuccList(const BasicBlock* block) { assert(block != nullptr); - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: diff --git a/src/coreclr/jit/codegenarm.cpp b/src/coreclr/jit/codegenarm.cpp index e8ebf46272fc4..54c4b7e20dcd5 100644 --- a/src/coreclr/jit/codegenarm.cpp +++ b/src/coreclr/jit/codegenarm.cpp @@ -124,7 +124,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block) assert(block->isBBCallAlwaysPair()); assert(block->bbNext != NULL); - assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + assert(block->bbNext->KindIs(BBJ_ALWAYS)); assert(block->bbNext->bbJumpDest != NULL); assert(block->bbNext->bbJumpDest->bbFlags & BBF_FINALLY_TARGET); @@ -630,7 +630,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -1294,7 +1294,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); diff --git a/src/coreclr/jit/codegenarm64.cpp b/src/coreclr/jit/codegenarm64.cpp index ed198e37a7328..6d22044c156b9 100644 --- a/src/coreclr/jit/codegenarm64.cpp +++ b/src/coreclr/jit/codegenarm64.cpp @@ -3749,7 +3749,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4650,7 +4650,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4841,7 +4841,7 @@ void CodeGen::genCodeForSelect(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op1 = tree->gtGetOp1(); GenTree* op2 = tree->gtGetOp2(); diff --git a/src/coreclr/jit/codegenarmarch.cpp b/src/coreclr/jit/codegenarmarch.cpp index a0a9967b24e04..9a3698627fac5 100644 --- a/src/coreclr/jit/codegenarmarch.cpp +++ b/src/coreclr/jit/codegenarmarch.cpp @@ -5515,7 +5515,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 022294c810ec3..8ccac405a37dd 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -376,7 +376,7 @@ void CodeGen::genMarkLabelsForCodegen() for (BasicBlock* const block : compiler->Blocks()) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: // This will also handle the BBJ_ALWAYS of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. case BBJ_COND: @@ -2256,7 +2256,7 @@ void CodeGen::genReportEH() { for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { ++clonedFinallyCount; } @@ -2582,7 +2582,7 @@ void CodeGen::genReportEH() unsigned reportedClonedFinallyCount = 0; for (BasicBlock* const block : compiler->Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { UNATIVE_OFFSET hndBeg, hndEnd; diff --git a/src/coreclr/jit/codegenlinear.cpp b/src/coreclr/jit/codegenlinear.cpp index d36eeb32210f9..c1b93541c14c8 100644 --- a/src/coreclr/jit/codegenlinear.cpp +++ b/src/coreclr/jit/codegenlinear.cpp @@ -330,7 +330,7 @@ void CodeGen::genCodeForBBlist() // // Note: We need to have set compCurBB before calling emitAddLabel // - if ((block->bbPrev != nullptr) && (block->bbPrev->bbJumpKind == BBJ_COND) && + if ((block->bbPrev != nullptr) && block->bbPrev->KindIs(BBJ_COND) && (block->bbWeight != block->bbPrev->bbWeight)) { JITDUMP("Adding label due to BB weight difference: BBJ_COND " FMT_BB " with weight " FMT_WT @@ -619,7 +619,7 @@ void CodeGen::genCodeForBBlist() { // We only need the NOP if we're not going to generate any more code as part of the block end. - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_THROW: @@ -662,7 +662,7 @@ void CodeGen::genCodeForBBlist() /* Do we need to generate a jump or return? */ - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_RETURN: genExitCode(block); @@ -812,10 +812,10 @@ void CodeGen::genCodeForBBlist() assert(ShouldAlignLoops()); assert(!block->isBBCallAlwaysPairTail()); #if FEATURE_EH_CALLFINALLY_THUNKS - assert(block->bbJumpKind != BBJ_CALLFINALLY); + assert(!block->KindIs(BBJ_CALLFINALLY)); #endif // FEATURE_EH_CALLFINALLY_THUNKS - GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->bbJumpKind == BBJ_ALWAYS)); + GetEmitter()->emitLoopAlignment(DEBUG_ARG1(block->KindIs(BBJ_ALWAYS))); } if ((block->bbNext != nullptr) && (block->bbNext->isLoopAlign())) @@ -2615,7 +2615,7 @@ void CodeGen::genStoreLongLclVar(GenTree* treeNode) // void CodeGen::genCodeForJcc(GenTreeCC* jcc) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(jcc->OperIs(GT_JCC)); inst_JCC(jcc->gtCondition, compiler->compCurBB->bbJumpDest); diff --git a/src/coreclr/jit/codegenloongarch64.cpp b/src/coreclr/jit/codegenloongarch64.cpp index f5b9e20b7c3b1..075b1f1c847d1 100644 --- a/src/coreclr/jit/codegenloongarch64.cpp +++ b/src/coreclr/jit/codegenloongarch64.cpp @@ -1217,7 +1217,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2932,7 +2932,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -4140,7 +4140,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // A GT_JCMP node is created for an integer-comparison's conditional branch. void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenriscv64.cpp b/src/coreclr/jit/codegenriscv64.cpp index 479cd7709c6ff..6b858312ae328 100644 --- a/src/coreclr/jit/codegenriscv64.cpp +++ b/src/coreclr/jit/codegenriscv64.cpp @@ -886,7 +886,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) { SetHasTailCalls(true); - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode() != nullptr); /* figure out what jump we have */ @@ -2578,7 +2578,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -3784,7 +3784,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJumpCompare(GenTreeOpCC* tree) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); assert(tree->OperIs(GT_JCMP)); assert(!varTypeIsFloating(tree)); diff --git a/src/coreclr/jit/codegenxarch.cpp b/src/coreclr/jit/codegenxarch.cpp index 6ff22fcf66d81..f5eb3cbf80256 100644 --- a/src/coreclr/jit/codegenxarch.cpp +++ b/src/coreclr/jit/codegenxarch.cpp @@ -373,7 +373,7 @@ void CodeGen::genEHFinallyOrFilterRet(BasicBlock* block) } else { - assert(block->bbJumpKind == BBJ_EHFILTERRET); + assert(block->KindIs(BBJ_EHFILTERRET)); // The return value has already been computed. instGen_Return(0); @@ -1445,7 +1445,7 @@ void CodeGen::genCodeForCompare(GenTreeOp* tree) // void CodeGen::genCodeForJTrue(GenTreeOp* jtrue) { - assert(compiler->compCurBB->bbJumpKind == BBJ_COND); + assert(compiler->compCurBB->KindIs(BBJ_COND)); GenTree* op = jtrue->gtGetOp1(); regNumber reg = genConsumeReg(op); @@ -4267,7 +4267,7 @@ void CodeGen::genTableBasedSwitch(GenTree* treeNode) // emits the table and an instruction to get the address of the first element void CodeGen::genJumpTable(GenTree* treeNode) { - noway_assert(compiler->compCurBB->bbJumpKind == BBJ_SWITCH); + noway_assert(compiler->compCurBB->KindIs(BBJ_SWITCH)); assert(treeNode->OperGet() == GT_JMPTABLE); unsigned jumpCount = compiler->compCurBB->bbJumpSwt->bbsCount; @@ -10245,7 +10245,7 @@ void CodeGen::genFnEpilog(BasicBlock* block) if (jmpEpilog) { - noway_assert(block->bbJumpKind == BBJ_RETURN); + noway_assert(block->KindIs(BBJ_RETURN)); noway_assert(block->GetFirstLIRNode()); // figure out what jump we have diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index e8c146a21707f..c3f63b48e4ab4 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -5275,7 +5275,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() } // If there is an unconditional jump (which is not part of callf/always pair) - if (opts.compJitHideAlignBehindJmp && (block->bbJumpKind == BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) + if (opts.compJitHideAlignBehindJmp && block->KindIs(BBJ_ALWAYS) && !block->isBBCallAlwaysPairTail()) { // Track the lower weight blocks if (block->bbWeight < minBlockSoFar) @@ -5300,7 +5300,7 @@ PhaseStatus Compiler::placeLoopAlignInstructions() bool unmarkedLoopAlign = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // It must be a retless BBJ_CALLFINALLY if we get here. assert(!block->isBBCallAlwaysPair()); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index a786b56edc29d..43d8e927c65f7 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -635,7 +635,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -649,7 +649,7 @@ BasicBlockVisit BasicBlock::VisitAllSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -769,7 +769,7 @@ BasicBlockVisit BasicBlock::VisitRegularSuccs(Compiler* comp, TFunc func) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbJumpKind != BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) + if (!bcall->KindIs(BBJ_CALLFINALLY) || (bcall->bbJumpDest != finBeg)) { continue; } @@ -3125,7 +3125,7 @@ inline bool Compiler::fgIsThrowHlpBlk(BasicBlock* block) return false; } - if (!(block->bbFlags & BBF_INTERNAL) || block->bbJumpKind != BBJ_THROW) + if (!(block->bbFlags & BBF_INTERNAL) || !block->KindIs(BBJ_THROW)) { return false; } @@ -3224,7 +3224,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) fgRemoveBlockAsPred(block); // Update jump kind after the scrub. - block->bbJumpKind = BBJ_THROW; + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // Any block with a throw is rare block->bbSetRunRarely(); @@ -3236,7 +3236,7 @@ inline void Compiler::fgConvertBBToThrowBB(BasicBlock* block) if (isCallAlwaysPair) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); // leaveBlk is now unreachable, so scrub the pred lists. leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; diff --git a/src/coreclr/jit/emitarm.cpp b/src/coreclr/jit/emitarm.cpp index c1dc431c93728..33ae40ee208ef 100644 --- a/src/coreclr/jit/emitarm.cpp +++ b/src/coreclr/jit/emitarm.cpp @@ -4379,7 +4379,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount /* = 0 #ifdef DEBUG // Mark the finally call - if (ins == INS_b && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_b && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } @@ -4523,7 +4523,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp index f0428d222fc6c..5e0b4f2e78a95 100644 --- a/src/coreclr/jit/emitarm64.cpp +++ b/src/coreclr/jit/emitarm64.cpp @@ -8495,7 +8495,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -8670,7 +8670,7 @@ void emitter::emitIns_J(instruction ins, BasicBlock* dst, int instrCount) #ifdef DEBUG // Mark the finally call - if (ins == INS_bl_local && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_bl_local && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/emitloongarch64.cpp b/src/coreclr/jit/emitloongarch64.cpp index 73f2dffebada8..40c4937fe3b6f 100644 --- a/src/coreclr/jit/emitloongarch64.cpp +++ b/src/coreclr/jit/emitloongarch64.cpp @@ -2046,7 +2046,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitriscv64.cpp b/src/coreclr/jit/emitriscv64.cpp index 51772e889a12e..de9df4abd5c0a 100644 --- a/src/coreclr/jit/emitriscv64.cpp +++ b/src/coreclr/jit/emitriscv64.cpp @@ -1058,7 +1058,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } diff --git a/src/coreclr/jit/emitxarch.cpp b/src/coreclr/jit/emitxarch.cpp index 65789413500cd..d03bb82ea9cc5 100644 --- a/src/coreclr/jit/emitxarch.cpp +++ b/src/coreclr/jit/emitxarch.cpp @@ -7614,7 +7614,7 @@ void emitter::emitIns_R_L(instruction ins, emitAttr attr, BasicBlock* dst, regNu #ifdef DEBUG // Mark the catch return - if (emitComp->compCurBB->bbJumpKind == BBJ_EHCATCHRET) + if (emitComp->compCurBB->KindIs(BBJ_EHCATCHRET)) { id->idDebugOnlyInfo()->idCatchRet = true; } @@ -9221,7 +9221,7 @@ void emitter::emitIns_J(instruction ins, #ifdef DEBUG // Mark the finally call - if (ins == INS_call && emitComp->compCurBB->bbJumpKind == BBJ_CALLFINALLY) + if (ins == INS_call && emitComp->compCurBB->KindIs(BBJ_CALLFINALLY)) { id->idDebugOnlyInfo()->idFinallyCall = true; } diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 00925dcf12c2b..9853f3f47b26e 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -206,7 +206,7 @@ BasicBlock* Compiler::fgNewBasicBlock(BBjumpKinds jumpKind) /* Allocate the block descriptor */ block = bbNewBasicBlock(jumpKind); - noway_assert(block->bbJumpKind == jumpKind); + noway_assert(block->KindIs(jumpKind)); /* Append the block to the end of the global basic block list */ @@ -395,7 +395,7 @@ void Compiler::fgChangeSwitchBlock(BasicBlock* oldSwitchBlock, BasicBlock* newSw { noway_assert(oldSwitchBlock != nullptr); noway_assert(newSwitchBlock != nullptr); - noway_assert(oldSwitchBlock->bbJumpKind == BBJ_SWITCH); + noway_assert(oldSwitchBlock->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // Walk the switch's jump table, updating the predecessor for each branch. @@ -457,7 +457,7 @@ void Compiler::fgReplaceSwitchJumpTarget(BasicBlock* blockSwitch, BasicBlock* ne noway_assert(blockSwitch != nullptr); noway_assert(newTarget != nullptr); noway_assert(oldTarget != nullptr); - noway_assert(blockSwitch->bbJumpKind == BBJ_SWITCH); + noway_assert(blockSwitch->KindIs(BBJ_SWITCH)); assert(fgPredsComputed); // For the jump targets values that match oldTarget of our BBJ_SWITCH @@ -537,7 +537,7 @@ void Compiler::fgReplaceJumpTarget(BasicBlock* block, BasicBlock* newTarget, Bas assert(block != nullptr); assert(fgPredsComputed); - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_COND: @@ -911,7 +911,7 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed } // Determine if the call site is in a no-return block - if (isInlining && (impInlineInfo->iciBlock->bbJumpKind == BBJ_THROW)) + if (isInlining && impInlineInfo->iciBlock->KindIs(BBJ_THROW)) { compInlineResult->Note(InlineObservation::CALLSITE_IN_NORETURN_REGION); } @@ -2721,7 +2721,7 @@ void Compiler::fgMarkBackwardJump(BasicBlock* targetBlock, BasicBlock* sourceBlo for (BasicBlock* const block : Blocks(targetBlock, sourceBlock)) { - if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && (block->bbJumpKind != BBJ_RETURN)) + if (((block->bbFlags & BBF_BACKWARD_JUMP) == 0) && !block->KindIs(BBJ_RETURN)) { block->bbFlags |= BBF_BACKWARD_JUMP; compHasBackwardJump = true; @@ -2771,7 +2771,7 @@ void Compiler::fgLinkBasicBlocks() for (BasicBlock* const curBBdesc : Blocks()) { - switch (curBBdesc->bbJumpKind) + switch (curBBdesc->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -3675,7 +3675,7 @@ void Compiler::fgFindBasicBlocks() // Still inside the filter block->setHndIndex(XTnum); - if (block->bbJumpKind == BBJ_EHFILTERRET) + if (block->KindIs(BBJ_EHFILTERRET)) { // Mark catch handler as successor. block->bbJumpDest = hndBegBB; @@ -3808,7 +3808,7 @@ void Compiler::fgFindBasicBlocks() // BBJ_EHFINALLYRET that were imported to BBJ_EHFAULTRET. if ((hndBegBB->bbCatchTyp == BBCT_FAULT) && block->KindIs(BBJ_EHFINALLYRET)) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } @@ -4015,9 +4015,9 @@ void Compiler::fgFixEntryFlowForOSR() // Now branch from method start to the OSR entry. // fgEnsureFirstBBisScratch(); - assert(fgFirstBB->bbJumpKind == BBJ_NONE); + assert(fgFirstBB->KindIs(BBJ_NONE)); fgRemoveRefPred(fgFirstBB->bbNext, fgFirstBB); - fgFirstBB->bbJumpKind = BBJ_ALWAYS; + fgFirstBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgFirstBB->bbJumpDest = fgOSREntryBB; FlowEdge* const edge = fgAddRefPred(fgOSREntryBB, fgFirstBB); edge->setLikelihood(1.0); @@ -4057,7 +4057,7 @@ void Compiler::fgCheckBasicBlockControlFlow() continue; } - switch (blk->bbJumpKind) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: // block flows into the next one (no jump) @@ -4099,14 +4099,14 @@ void Compiler::fgCheckBasicBlockControlFlow() HBtab = ehGetDsc(blk->getHndIndex()); // Endfilter allowed only in a filter block - if (blk->bbJumpKind == BBJ_EHFILTERRET) + if (blk->KindIs(BBJ_EHFILTERRET)) { if (!HBtab->HasFilter()) { BADCODE("Unexpected endfilter"); } } - else if (blk->bbJumpKind == BBJ_EHFILTERRET) + else if (blk->KindIs(BBJ_EHFILTERRET)) { // endfinally allowed only in a finally block if (!HBtab->HasFinallyHandler()) @@ -4114,7 +4114,7 @@ void Compiler::fgCheckBasicBlockControlFlow() BADCODE("Unexpected endfinally"); } } - else if (blk->bbJumpKind == BBJ_EHFAULTRET) + else if (blk->KindIs(BBJ_EHFAULTRET)) { // 'endfault' (alias of IL 'endfinally') allowed only in a fault block if (!HBtab->HasFaultHandler()) @@ -4560,7 +4560,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) { // We'd like to use fgNewBBafter(), but we need to update the preds list before linking in the new block. // (We need the successors of 'curr' to be correct when we do this.) - BasicBlock* newBlock = bbNewBasicBlock(curr->bbJumpKind); + BasicBlock* newBlock = bbNewBasicBlock(curr->GetBBJumpKind()); // Start the new block with no refs. When we set the preds below, this will get updated correctly. newBlock->bbRefs = 0; @@ -4568,7 +4568,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) // For each successor of the original block, set the new block as their predecessor. // Note we are using the "rational" version of the successor iterator that does not hide the finallyret arcs. // Without these arcs, a block 'b' may not be a member of succs(preds(b)) - if (curr->bbJumpKind != BBJ_SWITCH) + if (!curr->KindIs(BBJ_SWITCH)) { for (BasicBlock* const succ : curr->Succs(this)) { @@ -4628,7 +4628,7 @@ BasicBlock* Compiler::fgSplitBlockAtEnd(BasicBlock* curr) curr->bbFlags &= ~(BBF_HAS_JMP | BBF_RETLESS_CALL); // Default to fallthru, and add the arc for that. - curr->bbJumpKind = BBJ_NONE; + curr->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); fgAddRefPred(newBlock, curr); return newBlock; @@ -4874,7 +4874,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) JITDUMP("Splitting edge from " FMT_BB " to " FMT_BB "; adding " FMT_BB "\n", curr->bbNum, succ->bbNum, newBlock->bbNum); - if (curr->bbJumpKind == BBJ_COND) + if (curr->KindIs(BBJ_COND)) { fgReplacePred(succ, curr, newBlock); if (curr->bbJumpDest == succ) @@ -4884,7 +4884,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } fgAddRefPred(newBlock, curr); } - else if (curr->bbJumpKind == BBJ_SWITCH) + else if (curr->KindIs(BBJ_SWITCH)) { // newBlock replaces 'succ' in the switch. fgReplaceSwitchJumpTarget(curr, newBlock, succ); @@ -4894,7 +4894,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) } else { - assert(curr->bbJumpKind == BBJ_ALWAYS); + assert(curr->KindIs(BBJ_ALWAYS)); fgReplacePred(succ, curr, newBlock); curr->bbJumpDest = newBlock; fgAddRefPred(newBlock, curr); @@ -4907,7 +4907,7 @@ BasicBlock* Compiler::fgSplitEdge(BasicBlock* curr, BasicBlock* succ) // This isn't accurate, but it is complex to compute a reasonable number so just assume that we take the // branch 50% of the time. // - if (curr->bbJumpKind != BBJ_ALWAYS) + if (!curr->KindIs(BBJ_ALWAYS)) { newBlock->inheritWeightPercentage(curr, 50); } @@ -5054,7 +5054,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } #endif // FEATURE_EH_FUNCLETS - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { // bPrev CALL becomes RETLESS as the BBJ_ALWAYS block is unreachable bPrev->bbFlags |= BBF_RETLESS_CALL; @@ -5063,7 +5063,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) NO_WAY("No retless call finally blocks; need unwind target instead"); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (bPrev->bbJumpKind == BBJ_ALWAYS && bPrev->bbJumpDest == block->bbNext && + else if (bPrev->KindIs(BBJ_ALWAYS) && bPrev->bbJumpDest == block->bbNext && !(bPrev->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (block != fgFirstColdBlock) && (block->bbNext != fgFirstColdBlock)) { @@ -5071,7 +5071,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) // Note that we don't do it if bPrev follows a BBJ_CALLFINALLY block (BBF_KEEP_BBJ_ALWAYS), // because that would violate our invariant that BBJ_CALLFINALLY blocks are followed by // BBJ_ALWAYS blocks. - bPrev->bbJumpKind = BBJ_NONE; + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // If this is the first Cold basic block update fgFirstColdBlock @@ -5092,7 +5092,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (block->isBBCallAlwaysPair()) { BasicBlock* leaveBlk = block->bbNext; - noway_assert(leaveBlk->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlk->KindIs(BBJ_ALWAYS)); leaveBlk->bbFlags &= ~BBF_DONT_REMOVE; leaveBlk->bbRefs = 0; @@ -5104,7 +5104,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) fgClearFinallyTargetBit(leaveBlk->bbJumpDest); #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fgRemoveReturnBlock(block); } @@ -5129,7 +5129,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) #ifdef DEBUG /* Some extra checks for the empty case */ - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_NONE: break; @@ -5139,7 +5139,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) noway_assert(block->bbJumpDest != block); /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - noway_assert(bPrev && bPrev->bbJumpKind == BBJ_NONE); + noway_assert(bPrev && bPrev->KindIs(BBJ_NONE)); break; default: @@ -5154,7 +5154,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -5207,7 +5207,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* Must be a fall through to next block */ - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); /* old block no longer gets the extra ref count for being the first block */ block->bbRefs--; @@ -5235,7 +5235,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) /* If predBlock is a new predecessor, then add it to succBlock's predecessor's list. */ - if (predBlock->bbJumpKind != BBJ_SWITCH) + if (!predBlock->KindIs(BBJ_SWITCH)) { // Even if the pred is not a switch, we could have a conditional branch // to the fallthrough, so duplicate there could be preds @@ -5246,7 +5246,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) } /* change all jumps to the removed block */ - switch (predBlock->bbJumpKind) + switch (predBlock->GetBBJumpKind()) { default: noway_assert(!"Unexpected bbJumpKind in fgRemoveBlock()"); @@ -5257,10 +5257,10 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) PREFIX_ASSUME(bPrev != nullptr); /* In the case of BBJ_ALWAYS we have to change the type of its predecessor */ - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { /* bPrev now becomes a BBJ_ALWAYS */ - bPrev->bbJumpKind = BBJ_ALWAYS; + bPrev->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bPrev->bbJumpDest = succBlock; } break; @@ -5313,7 +5313,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if (bPrev != nullptr) { - switch (bPrev->bbJumpKind) + switch (bPrev->GetBBJumpKind()) { case BBJ_CALLFINALLY: // If prev is a BBJ_CALLFINALLY it better be marked as RETLESS @@ -5333,7 +5333,7 @@ void Compiler::fgRemoveBlock(BasicBlock* block, bool unreachable) if ((bPrev == fgFirstBB) || !bPrev->isBBCallAlwaysPairTail()) { // It's safe to change the jump type - bPrev->bbJumpKind = BBJ_NONE; + bPrev->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } break; @@ -5378,11 +5378,11 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) if (bSrc->bbFallsThrough() && (bSrc->bbNext != bDst)) { - switch (bSrc->bbJumpKind) + switch (bSrc->GetBBJumpKind()) { case BBJ_NONE: - bSrc->bbJumpKind = BBJ_ALWAYS; + bSrc->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); bSrc->bbJumpDest = bDst; JITDUMP("Block " FMT_BB " ended with a BBJ_NONE, Changed to an unconditional jump to " FMT_BB "\n", bSrc->bbNum, bSrc->bbJumpDest->bbNum); @@ -5459,10 +5459,10 @@ BasicBlock* Compiler::fgConnectFallThrough(BasicBlock* bSrc, BasicBlock* bDst) // If bSrc is an unconditional branch to the next block // then change it to a BBJ_NONE block // - if ((bSrc->bbJumpKind == BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && + if (bSrc->KindIs(BBJ_ALWAYS) && !(bSrc->bbFlags & BBF_KEEP_BBJ_ALWAYS) && (bSrc->bbJumpDest == bSrc->bbNext)) { - bSrc->bbJumpKind = BBJ_NONE; + bSrc->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); JITDUMP("Changed an unconditional jump from " FMT_BB " to the next block " FMT_BB " into a BBJ_NONE block\n", bSrc->bbNum, bSrc->bbNext->bbNum); @@ -6273,14 +6273,14 @@ bool Compiler::fgIsBetterFallThrough(BasicBlock* bCur, BasicBlock* bAlt) } else { - if (bAlt->bbJumpKind == BBJ_ALWAYS) + if (bAlt->KindIs(BBJ_ALWAYS)) { // Our result is true if bAlt's weight is more than bCur's weight result = (bAlt->bbWeight > bCur->bbWeight); } else { - noway_assert(bAlt->bbJumpKind == BBJ_COND); + noway_assert(bAlt->KindIs(BBJ_COND)); // Our result is true if bAlt's weight is more than twice bCur's weight result = (bAlt->bbWeight > (2 * bCur->bbWeight)); } @@ -6570,7 +6570,7 @@ BasicBlock* Compiler::fgFindInsertPoint(unsigned regionIndex, { goodBlk = blk; } - else if ((goodBlk->bbJumpKind == BBJ_COND) || (blk->bbJumpKind != BBJ_COND)) + else if (goodBlk->KindIs(BBJ_COND) || !blk->KindIs(BBJ_COND)) { if ((blk == nearBlk) || !reachedNear) { diff --git a/src/coreclr/jit/fgdiagnostic.cpp b/src/coreclr/jit/fgdiagnostic.cpp index afc1bbc1db73e..318e241d35ae0 100644 --- a/src/coreclr/jit/fgdiagnostic.cpp +++ b/src/coreclr/jit/fgdiagnostic.cpp @@ -101,7 +101,7 @@ void Compiler::fgDebugCheckUpdate() if (block->isEmpty() && !(block->bbFlags & BBF_DONT_REMOVE)) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: case BBJ_EHFINALLYRET: @@ -143,13 +143,13 @@ void Compiler::fgDebugCheckUpdate() // Check for an unnecessary jumps to the next block bool doAssertOnJumpToNextBlock = false; // unless we have a BBJ_COND or BBJ_ALWAYS we can not assert - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { // A conditional branch should never jump to the next block // as it can be folded into a BBJ_NONE; doAssertOnJumpToNextBlock = true; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { // Generally we will want to assert if a BBJ_ALWAYS branches to the next block doAssertOnJumpToNextBlock = true; @@ -184,7 +184,7 @@ void Compiler::fgDebugCheckUpdate() /* Make sure BBF_KEEP_BBJ_ALWAYS is set correctly */ - if ((block->bbJumpKind == BBJ_ALWAYS) && prevIsCallAlwaysPair) + if (block->KindIs(BBJ_ALWAYS) && prevIsCallAlwaysPair) { noway_assert(block->bbFlags & BBF_KEEP_BBJ_ALWAYS); } @@ -192,7 +192,7 @@ void Compiler::fgDebugCheckUpdate() /* For a BBJ_CALLFINALLY block we make sure that we are followed by */ /* an BBJ_ALWAYS block with BBF_INTERNAL set */ /* or that it's a BBF_RETLESS_CALL */ - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert((block->bbFlags & BBF_RETLESS_CALL) || block->isBBCallAlwaysPair()); } @@ -984,7 +984,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) } } - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { fprintf(fgxFile, "\\n"); @@ -1015,11 +1015,11 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) { fprintf(fgxFile, ", shape = \"house\""); } - else if (block->bbJumpKind == BBJ_RETURN) + else if (block->KindIs(BBJ_RETURN)) { fprintf(fgxFile, ", shape = \"invhouse\""); } - else if (block->bbJumpKind == BBJ_THROW) + else if (block->KindIs(BBJ_THROW)) { fprintf(fgxFile, ", shape = \"trapezium\""); } @@ -1035,7 +1035,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n bbNum); fprintf(fgxFile, "\n ordinal=\"%d\"", blockOrdinal); - fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->bbJumpKind]); + fprintf(fgxFile, "\n jumpKind=\"%s\"", kindImage[block->GetBBJumpKind()]); if (block->hasTryIndex()) { fprintf(fgxFile, "\n inTry=\"%s\"", "true"); @@ -1152,7 +1152,7 @@ bool Compiler::fgDumpFlowGraph(Phases phase, PhasePosition pos) fprintf(fgxFile, "\n id=\"%d\"", edgeNum); fprintf(fgxFile, "\n source=\"%d\"", bSource->bbNum); fprintf(fgxFile, "\n target=\"%d\"", bTarget->bbNum); - if (bSource->bbJumpKind == BBJ_SWITCH) + if (bSource->KindIs(BBJ_SWITCH)) { if (edge->getDupCount() >= 2) { @@ -2004,7 +2004,7 @@ void Compiler::fgTableDispBasicBlock(BasicBlock* block, int ibcColWidth /* = 0 * } else { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: printf("-> " FMT_BB "%*s ( cond )", block->bbJumpDest->bbNum, @@ -2606,8 +2606,7 @@ bool BBPredsChecker::CheckEhTryDsc(BasicBlock* block, BasicBlock* blockPred, EHb // block that does a local call to the finally. This BBJ_ALWAYS is within // the try region protected by the finally (for x86, ARM), but that's ok. BasicBlock* prevBlock = block->bbPrev; - if (prevBlock->bbJumpKind == BBJ_CALLFINALLY && block->bbJumpKind == BBJ_ALWAYS && - blockPred->bbJumpKind == BBJ_EHFINALLYRET) + if (prevBlock->KindIs(BBJ_CALLFINALLY) && block->KindIs(BBJ_ALWAYS) && blockPred->KindIs(BBJ_EHFINALLYRET)) { return true; } @@ -2634,7 +2633,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb } // Our try block can call our finally block - if ((block->bbCatchTyp == BBCT_FINALLY) && (blockPred->bbJumpKind == BBJ_CALLFINALLY) && + if ((block->bbCatchTyp == BBCT_FINALLY) && blockPred->KindIs(BBJ_CALLFINALLY) && comp->ehCallFinallyInCorrectRegion(blockPred, block->getHndIndex())) { return true; @@ -2660,7 +2659,7 @@ bool BBPredsChecker::CheckEhHndDsc(BasicBlock* block, BasicBlock* blockPred, EHb bool BBPredsChecker::CheckJump(BasicBlock* blockPred, BasicBlock* block) { - switch (blockPred->bbJumpKind) + switch (blockPred->GetBBJumpKind()) { case BBJ_COND: assert(blockPred->bbNext == block || blockPred->bbJumpDest == block); @@ -2734,7 +2733,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2756,7 +2755,7 @@ bool BBPredsChecker::CheckEHFinallyRet(BasicBlock* blockPred, BasicBlock* block) for (BasicBlock* const bcall : comp->Blocks(comp->fgFirstFuncletBB)) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2878,12 +2877,12 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // if (compPostImportationCleanupDone || ((block->bbFlags & BBF_IMPORTED) != 0)) { - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && block->lastNode()->OperIsConditionalJump()); } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { assert((!allNodesLinked || (block->lastNode()->gtNext == nullptr)) && (block->lastNode()->gtOper == GT_SWITCH || block->lastNode()->gtOper == GT_SWITCH_TABLE)); @@ -2987,7 +2986,7 @@ void Compiler::fgDebugCheckBBlist(bool checkBBNum /* = false */, bool checkBBRef // Don't depend on predecessors list for the check. for (BasicBlock* const succBlock : block->Succs()) { - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { BasicBlock* finallyBlock = succBlock->bbJumpDest; assert(finallyBlock->hasHndIndex()); @@ -3729,7 +3728,7 @@ void Compiler::fgDebugCheckBlockLinks() // If this is a switch, check that the tables are consistent. // Note that we don't call GetSwitchDescMap(), because it has the side-effect // of allocating it if it is not present. - if (block->bbJumpKind == BBJ_SWITCH && m_switchDescMap != nullptr) + if (block->KindIs(BBJ_SWITCH) && m_switchDescMap != nullptr) { SwitchUniqueSuccSet uniqueSuccSet; if (m_switchDescMap->Lookup(block, &uniqueSuccSet)) @@ -4792,13 +4791,13 @@ void Compiler::fgDebugCheckLoopTable() // The pre-header can only be BBJ_ALWAYS or BBJ_NONE and must enter the loop. BasicBlock* e = loop.lpEntry; - if (h->bbJumpKind == BBJ_ALWAYS) + if (h->KindIs(BBJ_ALWAYS)) { assert(h->bbJumpDest == e); } else { - assert(h->bbJumpKind == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == e); assert(loop.lpTop == e); assert(loop.lpIsTopEntry()); @@ -4907,7 +4906,7 @@ void Compiler::fgDebugCheckLoopTable() // TODO: We might want the following assert, but there are cases where we don't move all // return blocks out of the loop. // Return blocks are not allowed inside a loop; they should have been moved elsewhere. - // assert(block->bbJumpKind != BBJ_RETURN); + // assert(!block->KindIs(BBJ_RETURN)); } else { diff --git a/src/coreclr/jit/fgehopt.cpp b/src/coreclr/jit/fgehopt.cpp index 0d6fedf24ce3e..e5fbe43e1590f 100644 --- a/src/coreclr/jit/fgehopt.cpp +++ b/src/coreclr/jit/fgehopt.cpp @@ -100,7 +100,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() } // If the finally's block jumps back to itself, then it is not empty. - if ((firstBlock->bbJumpKind == BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) + if (firstBlock->KindIs(BBJ_ALWAYS) && firstBlock->bbJumpDest == firstBlock) { JITDUMP("EH#%u finally has basic block that jumps to itself; skipping.\n", XTnum); XTnum++; @@ -142,7 +142,7 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() { BasicBlock* nextBlock = currentBlock->bbNext; - if ((currentBlock->bbJumpKind == BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) + if (currentBlock->KindIs(BBJ_CALLFINALLY) && (currentBlock->bbJumpDest == firstBlock)) { // Retarget the call finally to jump to the return // point. @@ -160,10 +160,10 @@ PhaseStatus Compiler::fgRemoveEmptyFinally() JITDUMP("so that " FMT_BB " jumps to " FMT_BB "; then remove " FMT_BB "\n", currentBlock->bbNum, postTryFinallyBlock->bbNum, leaveBlock->bbNum); - noway_assert(leaveBlock->bbJumpKind == BBJ_ALWAYS); + noway_assert(leaveBlock->KindIs(BBJ_ALWAYS)); currentBlock->bbJumpDest = postTryFinallyBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(postTryFinallyBlock, currentBlock); @@ -373,7 +373,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Look for blocks that are always jumps to a call finally // pair that targets the finally - if (firstTryBlock->bbJumpKind != BBJ_ALWAYS) + if (!firstTryBlock->KindIs(BBJ_ALWAYS)) { JITDUMP("EH#%u first try block " FMT_BB " not jump to a callfinally; skipping.\n", XTnum, firstTryBlock->bbNum); @@ -437,7 +437,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() for (BasicBlock* block = firstCallFinallyRangeBlock; block != endCallFinallyRangeBlock; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) + if (block->KindIs(BBJ_CALLFINALLY) && (block->bbJumpDest == firstHandlerBlock)) { assert(block->isBBCallAlwaysPair()); @@ -463,7 +463,7 @@ PhaseStatus Compiler::fgRemoveEmptyTry() // Time to optimize. // // (1) Convert the callfinally to a normal jump to the handler - callFinally->bbJumpKind = BBJ_ALWAYS; + callFinally->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Identify the leave block and the continuation BasicBlock* const leave = callFinally->bbNext; @@ -536,13 +536,13 @@ PhaseStatus Compiler::fgRemoveEmptyTry() block->clearHndIndex(); } - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = block->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(block, finallyRet); - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = continuation; fgAddRefPred(continuation, block); fgRemoveRefPred(leave, block); @@ -738,7 +738,7 @@ PhaseStatus Compiler::fgCloneFinally() for (const BasicBlock* block = firstBlock; block != nextBlock; block = block->bbNext) { - if (block->bbJumpKind == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { hasSwitch = true; break; @@ -753,7 +753,7 @@ PhaseStatus Compiler::fgCloneFinally() regionStmtCount++; } - hasFinallyRet = hasFinallyRet || (block->bbJumpKind == BBJ_EHFINALLYRET); + hasFinallyRet = hasFinallyRet || (block->KindIs(BBJ_EHFINALLYRET)); isAllRare = isAllRare && block->isRunRarely(); } @@ -821,11 +821,11 @@ PhaseStatus Compiler::fgCloneFinally() // through to a callfinally. BasicBlock* jumpDest = nullptr; - if ((block->bbJumpKind == BBJ_NONE) && (block == lastTryBlock)) + if (block->KindIs(BBJ_NONE) && (block == lastTryBlock)) { jumpDest = block->bbNext; } - else if (block->bbJumpKind == BBJ_ALWAYS) + else if (block->KindIs(BBJ_ALWAYS)) { jumpDest = block->bbJumpDest; } @@ -989,8 +989,7 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* const placeToMoveAfter = firstCallFinallyBlock->bbPrev; - if ((placeToMoveAfter->bbJumpKind == BBJ_ALWAYS) && - (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) + if (placeToMoveAfter->KindIs(BBJ_ALWAYS) && (placeToMoveAfter->bbJumpDest == normalCallFinallyBlock)) { JITDUMP("Moving callfinally " FMT_BB " to be first in line, before " FMT_BB "\n", normalCallFinallyBlock->bbNum, firstCallFinallyBlock->bbNum); @@ -1050,7 +1049,7 @@ PhaseStatus Compiler::fgCloneFinally() // Avoid asserts when `fgNewBBinRegion` verifies the handler table, by mapping any cloned finally // return blocks to BBJ_ALWAYS (which we would do below if we didn't do it here). - BBjumpKinds bbNewJumpKind = (block->bbJumpKind == BBJ_EHFINALLYRET) ? BBJ_ALWAYS : block->bbJumpKind; + BBjumpKinds bbNewJumpKind = (block->KindIs(BBJ_EHFINALLYRET)) ? BBJ_ALWAYS : block->GetBBJumpKind(); if (block == firstBlock) { @@ -1132,13 +1131,13 @@ PhaseStatus Compiler::fgCloneFinally() { BasicBlock* newBlock = blockMap[block]; - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { Statement* finallyRet = newBlock->lastStmt(); GenTree* finallyRetExpr = finallyRet->GetRootNode(); assert(finallyRetExpr->gtOper == GT_RETFILT); fgRemoveStmt(newBlock, finallyRet); - assert(newBlock->bbJumpKind == BBJ_ALWAYS); // we mapped this above already + assert(newBlock->KindIs(BBJ_ALWAYS)); // we mapped this above already newBlock->bbJumpDest = normalCallFinallyReturn; fgAddRefPred(normalCallFinallyReturn, newBlock); @@ -1181,7 +1180,7 @@ PhaseStatus Compiler::fgCloneFinally() // This call returns to the expected spot, so // retarget it to branch to the clone. currentBlock->bbJumpDest = firstCloneBlock; - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // Ref count updates. fgAddRefPred(firstCloneBlock, currentBlock); @@ -1195,7 +1194,7 @@ PhaseStatus Compiler::fgCloneFinally() // All preds should be BBJ_EHFINALLYRETs from the finally. for (BasicBlock* const leavePred : leaveBlock->PredBlocks()) { - assert(leavePred->bbJumpKind == BBJ_EHFINALLYRET); + assert(leavePred->KindIs(BBJ_EHFINALLYRET)); assert(leavePred->getHndIndex() == XTnum); } @@ -1241,9 +1240,9 @@ PhaseStatus Compiler::fgCloneFinally() BasicBlock* const hndEndIter = HBtab->ebdHndLast->bbNext; for (BasicBlock* block = hndBegIter; block != hndEndIter; block = block->bbNext) { - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { - block->bbJumpKind = BBJ_EHFAULTRET; + block->SetBBJumpKind(BBJ_EHFAULTRET DEBUG_ARG(this)); } } } @@ -1407,7 +1406,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // logically "belong" to a child region and the exit // path validity will be checked when looking at the // try blocks in that region. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { continue; } @@ -1433,13 +1432,13 @@ void Compiler::fgDebugCheckTryFinallyExits() bool isCallToFinally = false; #if FEATURE_EH_CALLFINALLY_THUNKS - if (succBlock->bbJumpKind == BBJ_CALLFINALLY) + if (succBlock->KindIs(BBJ_CALLFINALLY)) { // case (a1) isCallToFinally = isFinally && (succBlock->bbJumpDest == finallyBlock); } #else - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { // case (a2) isCallToFinally = isFinally && (block->bbJumpDest == finallyBlock); @@ -1453,7 +1452,7 @@ void Compiler::fgDebugCheckTryFinallyExits() // case (b) isJumpToClonedFinally = true; } - else if (succBlock->bbJumpKind == BBJ_ALWAYS) + else if (succBlock->KindIs(BBJ_ALWAYS)) { if (succBlock->isEmpty()) { @@ -1466,7 +1465,7 @@ void Compiler::fgDebugCheckTryFinallyExits() } } } - else if (succBlock->bbJumpKind == BBJ_NONE) + else if (succBlock->KindIs(BBJ_NONE)) { if (succBlock->isEmpty()) { @@ -1899,7 +1898,7 @@ bool Compiler::fgRetargetBranchesToCanonicalCallFinally(BasicBlock* block, { // We expect callfinallys to be invoked by a BBJ_ALWAYS at this // stage in compilation. - if (block->bbJumpKind != BBJ_ALWAYS) + if (!block->KindIs(BBJ_ALWAYS)) { // Possible paranoia assert here -- no flow successor of // this block should be a callfinally for this try. @@ -2195,7 +2194,7 @@ PhaseStatus Compiler::fgTailMergeThrows() BasicBlock* const predBlock = predEdge->getSourceBlock(); nextPredEdge = predEdge->getNextPredEdge(); - switch (predBlock->bbJumpKind) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: { diff --git a/src/coreclr/jit/fgflow.cpp b/src/coreclr/jit/fgflow.cpp index 040cd378ac9c0..d2669ccaca382 100644 --- a/src/coreclr/jit/fgflow.cpp +++ b/src/coreclr/jit/fgflow.cpp @@ -343,7 +343,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) BasicBlock* bNext; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: if (!(block->bbFlags & BBF_RETLESS_CALL)) @@ -354,7 +354,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) bNext = block->bbNext; /* bNext is an unreachable BBJ_ALWAYS block */ - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); while (bNext->countOfInEdges() > 0) { @@ -403,7 +403,7 @@ void Compiler::fgRemoveBlockAsPred(BasicBlock* block) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if ((bcall->bbFlags & BBF_REMOVED) || bcall->bbJumpKind != BBJ_CALLFINALLY || + if ((bcall->bbFlags & BBF_REMOVED) || !bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; @@ -470,7 +470,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -491,7 +491,7 @@ void Compiler::fgSuccOfFinallyRetWork(BasicBlock* block, unsigned i, BasicBlock* Compiler::SwitchUniqueSuccSet Compiler::GetDescriptorForSwitch(BasicBlock* switchBlk) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); + assert(switchBlk->KindIs(BBJ_SWITCH)); BlockToSwitchDescMap* switchMap = GetSwitchDescMap(); SwitchUniqueSuccSet res; if (switchMap->Lookup(switchBlk, &res)) @@ -546,7 +546,7 @@ void Compiler::SwitchUniqueSuccSet::UpdateTarget(CompAllocator alloc, BasicBlock* from, BasicBlock* to) { - assert(switchBlk->bbJumpKind == BBJ_SWITCH); // Precondition. + assert(switchBlk->KindIs(BBJ_SWITCH)); // Precondition. // Is "from" still in the switch table (because it had more than one entry before?) bool fromStillPresent = false; diff --git a/src/coreclr/jit/fginline.cpp b/src/coreclr/jit/fginline.cpp index f29293c6b8c96..a844199697a60 100644 --- a/src/coreclr/jit/fginline.cpp +++ b/src/coreclr/jit/fginline.cpp @@ -675,12 +675,12 @@ class SubstitutePlaceholdersAndDevirtualizeWalker : public GenTreeVisitorIsIntegralConst(0)) { - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbNext, block); } else { - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_compiler)); m_compiler->fgRemoveRefPred(block->bbJumpDest, block); } } @@ -1444,7 +1444,7 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) // DDB 91389: Don't throw away the (only) inlinee block // when its return type is not BBJ_RETURN. // In other words, we need its BBJ_ to perform the right thing. - if (InlineeCompiler->fgFirstBB->bbJumpKind == BBJ_RETURN) + if (InlineeCompiler->fgFirstBB->KindIs(BBJ_RETURN)) { // Inlinee contains just one BB. So just insert its statement list to topBlock. if (InlineeCompiler->fgFirstBB->bbStmtList != nullptr) @@ -1523,20 +1523,20 @@ void Compiler::fgInsertInlineeBlocks(InlineInfo* pInlineInfo) block->bbFlags |= BBF_INTERNAL; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { noway_assert((block->bbFlags & BBF_HAS_JMP) == 0); if (block->bbNext) { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_ALWAYS to bottomBlock " FMT_BB "\n", block->bbNum, bottomBlock->bbNum); - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = bottomBlock; } else { JITDUMP("\nConvert bbJumpKind of " FMT_BB " to BBJ_NONE\n", block->bbNum); - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } fgAddRefPred(bottomBlock, block); @@ -1945,7 +1945,7 @@ Statement* Compiler::fgInlinePrependStatements(InlineInfo* inlineInfo) unsigned lclCnt = InlineeMethodInfo->locals.numArgs; bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // If the callee contains zero-init locals, we need to explicitly initialize them if we are // in a loop or if the caller doesn't have compInitMem set. Otherwise we can rely on the diff --git a/src/coreclr/jit/fgopt.cpp b/src/coreclr/jit/fgopt.cpp index 7d4c0f9b11ac4..18637ac7b49ca 100644 --- a/src/coreclr/jit/fgopt.cpp +++ b/src/coreclr/jit/fgopt.cpp @@ -292,7 +292,7 @@ void Compiler::fgComputeReturnBlocks() { // If this is a BBJ_RETURN block, add it to our list of all BBJ_RETURN blocks. This list is only // used to find return blocks. - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgReturnBlocks = new (this, CMK_Reachability) BasicBlockList(block, fgReturnBlocks); } @@ -362,7 +362,7 @@ void Compiler::fgComputeEnterBlocksSet() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -466,7 +466,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) block->bbFlags &= ~(BBF_REMOVED | BBF_INTERNAL); block->bbFlags |= BBF_IMPORTED; - block->bbJumpKind = BBJ_THROW; + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbSetRunRarely(); #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -474,7 +474,7 @@ bool Compiler::fgRemoveUnreachableBlocks(CanRemoveBlockBody canRemoveBlock) // the target node (of BBJ_ALWAYS) since BBJ_CALLFINALLY node is getting converted to a BBJ_THROW. if (bIsBBCallAlwaysPair) { - noway_assert(block->bbNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->bbNext->KindIs(BBJ_ALWAYS)); fgClearFinallyTargetBit(block->bbNext->bbJumpDest); } #endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) @@ -638,7 +638,7 @@ bool Compiler::fgRemoveDeadBlocks() // For ARM code, prevent creating retless calls by adding the BBJ_ALWAYS to the "fgAlwaysBlks" list. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { assert(block->isBBCallAlwaysPair()); @@ -1650,7 +1650,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // plausible flow target. Simplest is to just mark it as a throw. if (bbIsHandlerBeg(newTryEntry->bbNext)) { - newTryEntry->bbJumpKind = BBJ_THROW; + newTryEntry->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } else { @@ -1787,7 +1787,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() GenTree* const jumpIfEntryStateZero = gtNewOperNode(GT_JTRUE, TYP_VOID, compareEntryStateToZero); fgNewStmtAtBeg(fromBlock, jumpIfEntryStateZero); - fromBlock->bbJumpKind = BBJ_COND; + fromBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); fromBlock->bbJumpDest = toBlock; fgAddRefPred(toBlock, fromBlock); newBlock->inheritWeight(fromBlock); @@ -1827,7 +1827,7 @@ PhaseStatus Compiler::fgPostImportationCleanup() // it can be reached directly from "outside". // assert(fgFirstBB->bbJumpDest == osrEntry); - assert(fgFirstBB->bbJumpKind == BBJ_ALWAYS); + assert(fgFirstBB->KindIs(BBJ_ALWAYS)); if (entryJumpTarget != osrEntry) { @@ -1918,7 +1918,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) noway_assert(block->bbNext == bNext); - if (block->bbJumpKind != BBJ_NONE) + if (!block->KindIs(BBJ_NONE)) { return false; } @@ -2002,7 +2002,7 @@ bool Compiler::fgCanCompactBlocks(BasicBlock* block, BasicBlock* bNext) // (if they are valid). for (BasicBlock* const predBlock : bNext->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { return false; } @@ -2027,7 +2027,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) { noway_assert(block != nullptr); noway_assert((block->bbFlags & BBF_REMOVED) == 0); - noway_assert(block->bbJumpKind == BBJ_NONE); + noway_assert(block->KindIs(BBJ_NONE)); noway_assert(bNext == block->bbNext); noway_assert(bNext != nullptr); @@ -2234,7 +2234,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) // or if both block and bNext have non-zero weights // then we will use the max weight for the block. // - if (bNext->bbJumpKind == BBJ_THROW) + if (bNext->KindIs(BBJ_THROW)) { block->bbSetRunRarely(); } @@ -2268,7 +2268,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* set the right links */ - block->bbJumpKind = bNext->bbJumpKind; + block->SetBBJumpKind(bNext->GetBBJumpKind() DEBUG_ARG(this)); VarSetOps::AssignAllowUninitRhs(this, block->bbLiveOut, bNext->bbLiveOut); // Update the beginning and ending IL offsets (bbCodeOffs and bbCodeOffsEnd). @@ -2328,7 +2328,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) /* Set the jump targets */ - switch (bNext->bbJumpKind) + switch (bNext->GetBBJumpKind()) { case BBJ_CALLFINALLY: // Propagate RETLESS property @@ -2345,7 +2345,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) fgReplacePred(bNext->bbJumpDest, bNext, block); /* Update the predecessor list for 'bNext->bbNext' if it is different than 'bNext->bbJumpDest' */ - if (bNext->bbJumpKind == BBJ_COND && bNext->bbJumpDest != bNext->bbNext) + if (bNext->KindIs(BBJ_COND) && bNext->bbJumpDest != bNext->bbNext) { fgReplacePred(bNext->bbNext, bNext, block); } @@ -2375,7 +2375,7 @@ void Compiler::fgCompactBlocks(BasicBlock* block, BasicBlock* bNext) for (BasicBlock* bcall = begBlk; bcall != endBlk; bcall = bcall->bbNext) { - if (bcall->bbJumpKind != BBJ_CALLFINALLY || bcall->bbJumpDest != finBeg) + if (!bcall->KindIs(BBJ_CALLFINALLY) || bcall->bbJumpDest != finBeg) { continue; } @@ -2627,14 +2627,14 @@ void Compiler::fgUnreachableBlock(BasicBlock* block) // void Compiler::fgRemoveConditionalJump(BasicBlock* block) { - noway_assert(block->bbJumpKind == BBJ_COND && block->bbJumpDest == block->bbNext); + noway_assert(block->KindIs(BBJ_COND) && block->bbJumpDest == block->bbNext); assert(compRationalIRForm == block->IsLIR()); FlowEdge* flow = fgGetPredForBlock(block->bbNext, block); noway_assert(flow->getDupCount() == 2); // Change the BBJ_COND to BBJ_NONE, and adjust the refCount and dupCount. - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); --block->bbNext->bbRefs; flow->decrementDupCount(); @@ -2735,7 +2735,7 @@ bool Compiler::fgOptimizeBranchToEmptyUnconditional(BasicBlock* block, BasicBloc bool optimizeJump = true; assert(bDest->isEmpty()); - assert(bDest->bbJumpKind == BBJ_ALWAYS); + assert(bDest->KindIs(BBJ_ALWAYS)); // We do not optimize jumps between two different try regions. // However jumping to a block that is not in any try region is OK @@ -2886,7 +2886,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool madeChanges = false; BasicBlock* bPrev = block->bbPrev; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_SWITCH: @@ -2930,7 +2930,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) } /* Empty GOTO can be removed iff bPrev is BBJ_NONE */ - if (bPrev->bbJumpKind != BBJ_NONE) + if (!bPrev->KindIs(BBJ_NONE)) { break; } @@ -2957,7 +2957,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { /* If this block follows a BBJ_CALLFINALLY do not remove it * (because we don't know who may jump to it) */ - if (bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (bPrev->KindIs(BBJ_CALLFINALLY)) { break; } @@ -2980,7 +2980,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) { BasicBlock* succBlock; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { succBlock = block->bbJumpDest; } @@ -2997,7 +2997,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) bool okToMerge = true; // assume it's ok for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_EHCATCHRET) + if (predBlock->KindIs(BBJ_EHCATCHRET)) { assert(predBlock->bbJumpDest == block); okToMerge = false; // we can't get rid of the empty block @@ -3119,7 +3119,7 @@ bool Compiler::fgOptimizeEmptyBlock(BasicBlock* block) // bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); unsigned jmpCnt = block->bbJumpSwt->bbsCount; BasicBlock** jmpTab = block->bbJumpSwt->bbsDstTab; @@ -3134,7 +3134,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) bNewDest = bDest; // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { bool optimizeJump = true; @@ -3312,7 +3312,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) // Change the switch jump into a BBJ_ALWAYS block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); if (jmpCnt > 1) { for (unsigned i = 1; i < jmpCnt; ++i) @@ -3377,7 +3377,7 @@ bool Compiler::fgOptimizeSwitchBranches(BasicBlock* block) } block->bbJumpDest = block->bbJumpSwt->bbsDstTab[0]; - block->bbJumpKind = BBJ_COND; + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); JITDUMP("After:\n"); DISPNODE(switchTree); @@ -3502,7 +3502,7 @@ bool Compiler::fgBlockIsGoodTailDuplicationCandidate(BasicBlock* target, unsigne // // This is by no means the only kind of tail that it is beneficial to duplicate, // just the only one we recognize for now. - if (target->bbJumpKind != BBJ_COND) + if (!target->KindIs(BBJ_COND)) { return false; } @@ -3741,7 +3741,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // if (opts.IsOSR()) { - assert(target->bbJumpKind == BBJ_COND); + assert(target->KindIs(BBJ_COND)); if ((target->bbNext->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) { @@ -3788,7 +3788,7 @@ bool Compiler::fgOptimizeUncondBranchToSimpleCond(BasicBlock* block, BasicBlock* // Fix up block's flow // - block->bbJumpKind = BBJ_COND; + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = target->bbJumpDest; fgAddRefPred(block->bbJumpDest, block); fgRemoveRefPred(target, block); @@ -3829,7 +3829,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi assert(block->bbNext == bNext); assert(block->bbPrev == bPrev); - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { // We can't remove it if it is a branch from hot => cold if (!fgInDifferentRegions(block, bNext)) @@ -3841,7 +3841,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi if (!block->isBBCallAlwaysPairTail()) { /* the unconditional jump is to the next BB */ - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); #ifdef DEBUG if (verbose) { @@ -3859,7 +3859,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi else { /* remove the conditional statement at the end of block */ - noway_assert(block->bbJumpKind == BBJ_COND); + noway_assert(block->KindIs(BBJ_COND)); noway_assert(block->isValid()); #ifdef DEBUG @@ -3967,7 +3967,7 @@ bool Compiler::fgOptimizeBranchToNext(BasicBlock* block, BasicBlock* bNext, Basi /* Conditional is gone - simply fall into the next block */ - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); /* Update bbRefs and bbNum - Conditional predecessors to the same * block are counted twice so we have to remove one of them */ @@ -4002,7 +4002,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) return false; } - if (bJump->bbJumpKind != BBJ_ALWAYS) + if (!bJump->KindIs(BBJ_ALWAYS)) { return false; } @@ -4021,7 +4021,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) BasicBlock* bDest = bJump->bbJumpDest; - if (bDest->bbJumpKind != BBJ_COND) + if (!bDest->KindIs(BBJ_COND)) { return false; } @@ -4232,7 +4232,7 @@ bool Compiler::fgOptimizeBranch(BasicBlock* bJump) // We need to update the following flags of the bJump block if they were set in the bDest block bJump->bbFlags |= bDest->bbFlags & BBF_COPY_PROPAGATE; - bJump->bbJumpKind = BBJ_COND; + bJump->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); bJump->bbJumpDest = bDest->bbNext; /* Update bbRefs and bbPreds */ @@ -4324,7 +4324,7 @@ bool Compiler::fgOptimizeSwitchJumps() // assert(!block->IsLIR()); - if (block->bbJumpKind != BBJ_SWITCH) + if (!block->KindIs(BBJ_SWITCH)) { continue; } @@ -4393,7 +4393,7 @@ bool Compiler::fgOptimizeSwitchJumps() // Wire up the new control flow. // - block->bbJumpKind = BBJ_COND; + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); block->bbJumpDest = dominantTarget; FlowEdge* const blockToTargetEdge = fgAddRefPred(dominantTarget, block); FlowEdge* const blockToNewBlockEdge = newBlock->bbPreds; @@ -4516,7 +4516,7 @@ bool Compiler::fgExpandRarelyRunBlocks() noway_assert(tmpbb->isBBCallAlwaysPair()); bPrevPrev = tmpbb; #else - if (tmpbb->bbJumpKind == BBJ_CALLFINALLY) + if (tmpbb->KindIs(BBJ_CALLFINALLY)) { bPrevPrev = tmpbb; } @@ -4610,7 +4610,7 @@ bool Compiler::fgExpandRarelyRunBlocks() const char* reason = nullptr; - switch (bPrev->bbJumpKind) + switch (bPrev->GetBBJumpKind()) { case BBJ_ALWAYS: @@ -4742,7 +4742,7 @@ bool Compiler::fgExpandRarelyRunBlocks() } /* COMPACT blocks if possible */ - if (bPrev->bbJumpKind == BBJ_NONE) + if (bPrev->KindIs(BBJ_NONE)) { if (fgCanCompactBlocks(bPrev, block)) { @@ -4934,7 +4934,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (forwardBranch) { - if (bPrev->bbJumpKind == BBJ_ALWAYS) + if (bPrev->KindIs(BBJ_ALWAYS)) { // We can pull up the blocks that the unconditional jump branches to // if the weight of bDest is greater or equal to the weight of block @@ -5017,9 +5017,9 @@ bool Compiler::fgReorderBlocks(bool useProfile) } } } - else // (bPrev->bbJumpKind == BBJ_COND) + else // (bPrev->KindIs(BBJ_COND)) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); // // We will reverse branch if the taken-jump to bDest ratio (i.e. 'takenRatio') // is more than 51% @@ -5211,7 +5211,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) /* (bPrev is known to be a normal block at this point) */ if (!isRare) { - if ((bDest == block->bbNext) && (block->bbJumpKind == BBJ_RETURN) && (bPrev->bbJumpKind == BBJ_ALWAYS)) + if ((bDest == block->bbNext) && block->KindIs(BBJ_RETURN) && bPrev->KindIs(BBJ_ALWAYS)) { // This is a common case with expressions like "return Expr1 && Expr2" -- move the return // to establish fall-through. @@ -5245,7 +5245,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) const bool optimizedBranch = fgOptimizeBranch(bPrev); if (optimizedBranch) { - noway_assert(bPrev->bbJumpKind == BBJ_COND); + noway_assert(bPrev->KindIs(BBJ_COND)); optimizedBranches = true; } continue; @@ -5422,7 +5422,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) // if (bEnd2->isBBCallAlwaysPair()) { - noway_assert(bNext->bbJumpKind == BBJ_ALWAYS); + noway_assert(bNext->KindIs(BBJ_ALWAYS)); // Move bEnd2 and bNext forward bEnd2 = bNext; bNext = bNext->bbNext; @@ -5501,12 +5501,12 @@ bool Compiler::fgReorderBlocks(bool useProfile) { if (bDest != nullptr) { - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf("Decided to reverse conditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); } - else if (bPrev->bbJumpKind == BBJ_ALWAYS) + else if (bPrev->KindIs(BBJ_ALWAYS)) { printf("Decided to straighten unconditional branch at block " FMT_BB " branch to " FMT_BB " ", bPrev->bbNum, bDest->bbNum); @@ -5576,7 +5576,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) noway_assert(bEnd != nullptr); // bEnd can't be a BBJ_CALLFINALLY unless it is a RETLESS call - noway_assert((bEnd->bbJumpKind != BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); + noway_assert(!bEnd->KindIs(BBJ_CALLFINALLY) || (bEnd->bbFlags & BBF_RETLESS_CALL)); // bStartPrev must be set to the block that precedes bStart noway_assert(bStartPrev->bbNext == bStart); @@ -5715,7 +5715,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) BasicBlock* nearBlk = nullptr; BasicBlock* jumpBlk = nullptr; - if ((bEnd->bbJumpKind == BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && + if (bEnd->KindIs(BBJ_ALWAYS) && (!isRare || bEnd->bbJumpDest->isRunRarely()) && fgIsForwardBranch(bEnd, bPrev)) { // Set nearBlk to be the block in [startBlk..endBlk] @@ -5843,7 +5843,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) printf("block " FMT_BB, bStart->bbNum); } - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { printf(" by reversing conditional jump at " FMT_BB "\n", bPrev->bbNum); } @@ -5854,7 +5854,7 @@ bool Compiler::fgReorderBlocks(bool useProfile) } #endif // DEBUG - if (bPrev->bbJumpKind == BBJ_COND) + if (bPrev->KindIs(BBJ_COND)) { /* Reverse the bPrev jump condition */ Statement* const condTestStmt = bPrev->lastStmt(); @@ -6102,7 +6102,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) bNext = block->bbNext; bDest = nullptr; - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { bDest = block->bbJumpDest; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, bDest)) @@ -6114,7 +6114,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } } - if (block->bbJumpKind == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { bDest = nullptr; if (doTailDuplication && fgOptimizeUncondBranchToSimpleCond(block, block->bbNext)) @@ -6146,7 +6146,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) if (bDest != nullptr) { // Do we have a JUMP to an empty unconditional JUMP block? - if (bDest->isEmpty() && (bDest->bbJumpKind == BBJ_ALWAYS) && + if (bDest->isEmpty() && bDest->KindIs(BBJ_ALWAYS) && (bDest != bDest->bbJumpDest)) // special case for self jumps { if (fgOptimizeBranchToEmptyUnconditional(block, bDest)) @@ -6165,12 +6165,12 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // (b) block jump target is elsewhere but join free, and // bNext's jump target has a join. // - if ((block->bbJumpKind == BBJ_COND) && // block is a BBJ_COND block - (bNext != nullptr) && // block is not the last block - (bNext->bbRefs == 1) && // No other block jumps to bNext - (bNext->bbJumpKind == BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block - bNext->isEmpty() && // and it is an empty block - (bNext != bNext->bbJumpDest) && // special case for self jumps + if (block->KindIs(BBJ_COND) && // block is a BBJ_COND block + (bNext != nullptr) && // block is not the last block + (bNext->bbRefs == 1) && // No other block jumps to bNext + bNext->KindIs(BBJ_ALWAYS) && // The next block is a BBJ_ALWAYS block + bNext->isEmpty() && // and it is an empty block + (bNext != bNext->bbJumpDest) && // special case for self jumps (bDest != fgFirstColdBlock) && (!fgInDifferentRegions(block, bDest))) // do not cross hot/cold sections { @@ -6383,7 +6383,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) // // Update the switch jump table such that it follows jumps to jumps: // - if (block->bbJumpKind == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { if (fgOptimizeSwitchBranches(block)) { @@ -6418,11 +6418,11 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) #if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM) // Don't remove the BBJ_ALWAYS block of a BBJ_CALLFINALLY/BBJ_ALWAYS pair. - if (block->countOfInEdges() == 0 && bPrev->bbJumpKind == BBJ_CALLFINALLY) + if (block->countOfInEdges() == 0 && bPrev->KindIs(BBJ_CALLFINALLY)) { assert(bPrev->isBBCallAlwaysPair()); noway_assert(!(bPrev->bbFlags & BBF_RETLESS_CALL)); - noway_assert(block->bbJumpKind == BBJ_ALWAYS); + noway_assert(block->KindIs(BBJ_ALWAYS)); bPrev = block; continue; } @@ -6454,7 +6454,7 @@ bool Compiler::fgUpdateFlowGraph(bool doTailDuplication, bool isPhase) } else if (block->countOfInEdges() == 1) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: @@ -6551,7 +6551,7 @@ unsigned Compiler::fgGetCodeEstimate(BasicBlock* block) { unsigned costSz = 0; // estimate of block's code size cost - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_NONE: costSz = 0; @@ -6899,7 +6899,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) } bool const isNoSplit = stmt == predBlock->firstStmt(); - bool const isFallThrough = (predBlock->bbJumpKind == BBJ_NONE); + bool const isFallThrough = (predBlock->KindIs(BBJ_NONE)); // Is this block possibly better than what we have? // @@ -6976,7 +6976,7 @@ PhaseStatus Compiler::fgHeadTailMerge(bool early) // Fix up the flow. // - predBlock->bbJumpKind = BBJ_ALWAYS; + predBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); predBlock->bbJumpDest = crossJumpTarget; fgRemoveRefPred(block, predBlock); @@ -7067,7 +7067,7 @@ bool Compiler::fgTryOneHeadMerge(BasicBlock* block, bool early) // ternaries in C#). // The logic below could be generalized to BBJ_SWITCH, but this currently // has almost no CQ benefit but does have a TP impact. - if ((block->bbJumpKind != BBJ_COND) || (block->bbNext == block->bbJumpDest)) + if (!block->KindIs(BBJ_COND) || (block->bbNext == block->bbJumpDest)) { return false; } diff --git a/src/coreclr/jit/fgprofile.cpp b/src/coreclr/jit/fgprofile.cpp index 16d0b0e307010..6444e45085db7 100644 --- a/src/coreclr/jit/fgprofile.cpp +++ b/src/coreclr/jit/fgprofile.cpp @@ -473,7 +473,7 @@ void BlockCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // Scan for critical preds, and add relocated probes to non-critical preds. // @@ -499,12 +499,12 @@ void BlockCountInstrumentor::RelocateProbes() { // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -945,7 +945,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) visitor->VisitBlock(block); nBlocks++; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_CALLFINALLY: { @@ -1028,7 +1028,7 @@ void Compiler::WalkSpanningTree(SpanningTreeVisitor* visitor) JITDUMP("No jump dest for " FMT_BB ", suspect bad code\n", block->bbNum); visitor->Badcode(); } - else if (block->bbJumpKind != BBJ_LEAVE) + else if (!block->KindIs(BBJ_LEAVE)) { JITDUMP("EH RET in " FMT_BB " most-nested in try, suspect bad code\n", block->bbNum); visitor->Badcode(); @@ -1552,9 +1552,9 @@ void EfficientEdgeCountInstrumentor::SplitCriticalEdges() // Importer folding may have changed the block jump kind // to BBJ_NONE. If so, warp it back to BBJ_ALWAYS. // - if (block->bbJumpKind == BBJ_NONE) + if (block->KindIs(BBJ_NONE)) { - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); block->bbJumpDest = target; } @@ -1657,7 +1657,7 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() } JITDUMP("Return " FMT_BB " is successor of possible tail call\n", block->bbNum); - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); // This block should have just one probe, which we no longer need. // @@ -1695,12 +1695,12 @@ void EfficientEdgeCountInstrumentor::RelocateProbes() // Ensure this pred is not a fall through. // - if (pred->bbJumpKind == BBJ_NONE) + if (pred->KindIs(BBJ_NONE)) { - pred->bbJumpKind = BBJ_ALWAYS; + pred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); pred->bbJumpDest = block; } - assert(pred->bbJumpKind == BBJ_ALWAYS); + assert(pred->KindIs(BBJ_ALWAYS)); } } @@ -3166,7 +3166,7 @@ void EfficientEdgeCountReconstructor::Prepare() m_unknownBlocks++; #ifdef DEBUG - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { nReturns++; } @@ -3233,7 +3233,7 @@ void EfficientEdgeCountReconstructor::Prepare() CLRRandom* const random = m_comp->impInlineRoot()->m_inlineStrategy->GetRandom(JitConfig.JitRandomEdgeCounts()); - const bool isReturn = sourceBlock->bbJumpKind == BBJ_RETURN; + const bool isReturn = sourceBlock->KindIs(BBJ_RETURN); // We simulate the distribution of counts seen in StdOptimizationData.Mibc. // @@ -3922,7 +3922,7 @@ void EfficientEdgeCountReconstructor::PropagateEdges(BasicBlock* block, BlockInf // void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, BlockInfo* info) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_SWITCH: MarkInterestingSwitches(block, info); @@ -3949,7 +3949,7 @@ void EfficientEdgeCountReconstructor::MarkInterestingBlocks(BasicBlock* block, B // void EfficientEdgeCountReconstructor::MarkInterestingSwitches(BasicBlock* block, BlockInfo* info) { - assert(block->bbJumpKind == BBJ_SWITCH); + assert(block->KindIs(BBJ_SWITCH)); // Thresholds for detecting a dominant switch case. // @@ -4429,11 +4429,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) bSrc = bDst->bbPreds->getSourceBlock(); // Does this block flow into only one other block - if (bSrc->bbJumpKind == BBJ_NONE) + if (bSrc->KindIs(BBJ_NONE)) { bOnlyNext = bSrc->bbNext; } - else if (bSrc->bbJumpKind == BBJ_ALWAYS) + else if (bSrc->KindIs(BBJ_ALWAYS)) { bOnlyNext = bSrc->bbJumpDest; } @@ -4450,11 +4450,11 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) } // Does this block flow into only one other block - if (bDst->bbJumpKind == BBJ_NONE) + if (bDst->KindIs(BBJ_NONE)) { bOnlyNext = bDst->bbNext; } - else if (bDst->bbJumpKind == BBJ_ALWAYS) + else if (bDst->KindIs(BBJ_ALWAYS)) { bOnlyNext = bDst->bbJumpDest; } @@ -4485,7 +4485,7 @@ bool Compiler::fgComputeMissingBlockWeights(weight_t* returnWeight) // To minimize asmdiffs for now, modify weights only if splitting. if (fgFirstColdBlock != nullptr) { - if (bSrc->bbJumpKind == BBJ_CALLFINALLY) + if (bSrc->KindIs(BBJ_CALLFINALLY)) { newWeight = bSrc->bbWeight; } @@ -4687,7 +4687,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() } slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - switch (bSrc->bbJumpKind) + switch (bSrc->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_EHCATCHRET: @@ -4756,7 +4756,7 @@ PhaseStatus Compiler::fgComputeEdgeWeights() bSrc = edge->getSourceBlock(); slop = BasicBlock::GetSlopFraction(bSrc, bDst) + 1; - if (bSrc->bbJumpKind == BBJ_COND) + if (bSrc->KindIs(BBJ_COND)) { weight_t diff; FlowEdge* otherEdge; diff --git a/src/coreclr/jit/fgprofilesynthesis.cpp b/src/coreclr/jit/fgprofilesynthesis.cpp index 286510cf71d60..90d56a835ff10 100644 --- a/src/coreclr/jit/fgprofilesynthesis.cpp +++ b/src/coreclr/jit/fgprofilesynthesis.cpp @@ -132,7 +132,7 @@ void ProfileSynthesis::AssignLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -332,8 +332,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // THROW heuristic // - bool const isJumpThrow = (jump->bbJumpKind == BBJ_THROW); - bool const isNextThrow = (next->bbJumpKind == BBJ_THROW); + bool const isJumpThrow = jump->KindIs(BBJ_THROW); + bool const isNextThrow = next->KindIs(BBJ_THROW); if (isJumpThrow != isNextThrow) { @@ -402,8 +402,8 @@ void ProfileSynthesis::AssignLikelihoodCond(BasicBlock* block) // RETURN heuristic // - bool const isJumpReturn = (jump->bbJumpKind == BBJ_RETURN); - bool const isNextReturn = (next->bbJumpKind == BBJ_RETURN); + bool const isJumpReturn = jump->KindIs(BBJ_RETURN); + bool const isNextReturn = next->KindIs(BBJ_RETURN); if (isJumpReturn != isNextReturn) { @@ -499,7 +499,7 @@ void ProfileSynthesis::RepairLikelihoods() for (BasicBlock* const block : m_comp->Blocks()) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -551,7 +551,7 @@ void ProfileSynthesis::RepairLikelihoods() } JITDUMP("\n"); - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -591,7 +591,7 @@ void ProfileSynthesis::BlendLikelihoods() { weight_t sum = SumOutgoingLikelihoods(block, &likelihoods); - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_THROW: case BBJ_RETURN: @@ -627,7 +627,7 @@ void ProfileSynthesis::BlendLikelihoods() bool const consistent = Compiler::fgProfileWeightsEqual(sum, 1.0, epsilon); bool const zero = Compiler::fgProfileWeightsEqual(block->bbWeight, 0.0, epsilon); - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { AssignLikelihoodCond(block); } @@ -1214,7 +1214,7 @@ void ProfileSynthesis::ComputeCyclicProbabilities(SimpleLoop* loop) // // Currently we don't know which edges do this. // - if ((exitBlock->bbJumpKind == BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) + if (exitBlock->KindIs(BBJ_COND) && (exitBlockWeight > (missingExitWeight + currentExitWeight))) { JITDUMP("Will adjust likelihood of the exit edge from loop exit block " FMT_BB " to reflect capping; current likelihood is " FMT_WT "\n", diff --git a/src/coreclr/jit/flowgraph.cpp b/src/coreclr/jit/flowgraph.cpp index 3b157483cd75f..78dc4571352aa 100644 --- a/src/coreclr/jit/flowgraph.cpp +++ b/src/coreclr/jit/flowgraph.cpp @@ -120,7 +120,7 @@ PhaseStatus Compiler::fgInsertGCPolls() JITDUMP("Selecting CALL poll in block " FMT_BB " because it is the single return block\n", block->bbNum); pollType = GCPOLL_CALL; } - else if (BBJ_SWITCH == block->bbJumpKind) + else if (BBJ_SWITCH == block->GetBBJumpKind()) { // We don't want to deal with all the outgoing edges of a switch block. // @@ -254,15 +254,15 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) BasicBlock* topFallThrough = nullptr; unsigned char lpIndexFallThrough = BasicBlock::NOT_IN_LOOP; - if (top->bbJumpKind == BBJ_COND) + if (top->KindIs(BBJ_COND)) { topFallThrough = top->bbNext; lpIndexFallThrough = topFallThrough->bbNatLoopNum; } BasicBlock* poll = fgNewBBafter(BBJ_NONE, top, true); - bottom = fgNewBBafter(top->bbJumpKind, poll, true); - BBjumpKinds oldJumpKind = top->bbJumpKind; + bottom = fgNewBBafter(top->GetBBJumpKind(), poll, true); + BBjumpKinds oldJumpKind = top->GetBBJumpKind(); unsigned char lpIndex = top->bbNatLoopNum; // Update block flags @@ -372,7 +372,7 @@ BasicBlock* Compiler::fgCreateGCPoll(GCPollType pollType, BasicBlock* block) #endif top->bbJumpDest = bottom; - top->bbJumpKind = BBJ_COND; + top->SetBBJumpKind(BBJ_COND DEBUG_ARG(this)); // Bottom has Top and Poll as its predecessors. Poll has just Top as a predecessor. fgAddRefPred(bottom, poll); @@ -1287,7 +1287,7 @@ void Compiler::fgLoopCallMark() for (BasicBlock* const block : Blocks()) { - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -1728,7 +1728,7 @@ void Compiler::fgAddSyncMethodEnterExit() // non-exceptional cases for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { fgCreateMonitorTree(lvaMonAcquired, info.compThisArg, block, false /*exit*/); } @@ -1772,7 +1772,7 @@ GenTree* Compiler::fgCreateMonitorTree(unsigned lvaMonAcquired, unsigned lvaThis } #endif - if (block->bbJumpKind == BBJ_RETURN && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) + if (block->KindIs(BBJ_RETURN) && block->lastStmt()->GetRootNode()->gtOper == GT_RETURN) { GenTreeUnOp* retNode = block->lastStmt()->GetRootNode()->AsUnOp(); GenTree* retExpr = retNode->gtOp1; @@ -1821,7 +1821,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(genReturnBB != nullptr); assert(genReturnBB != block); assert(fgReturnCount <= 1); // We have a single return for synchronized methods - assert(block->bbJumpKind == BBJ_RETURN); + assert(block->KindIs(BBJ_RETURN)); assert((block->bbFlags & BBF_HAS_JMP) == 0); assert(block->hasTryIndex()); assert(!block->hasHndIndex()); @@ -1837,7 +1837,7 @@ void Compiler::fgConvertSyncReturnToLeave(BasicBlock* block) assert(ehDsc->ebdEnclosingHndIndex == EHblkDsc::NO_ENCLOSING_INDEX); // Convert the BBJ_RETURN to BBJ_ALWAYS, jumping to genReturnBB. - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); @@ -1949,7 +1949,7 @@ bool Compiler::fgMoreThanOneReturnBlock() for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { retCnt++; if (retCnt > 1) @@ -2309,7 +2309,7 @@ class MergedReturns // Change BBJ_RETURN to BBJ_ALWAYS targeting const return block. assert((comp->info.compFlags & CORINFO_FLG_SYNCH) == 0); - returnBlock->bbJumpKind = BBJ_ALWAYS; + returnBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); returnBlock->bbJumpDest = constReturnBlock; comp->fgAddRefPred(constReturnBlock, returnBlock); @@ -2596,7 +2596,7 @@ PhaseStatus Compiler::fgAddInternal() for (BasicBlock* block = fgFirstBB; block != lastBlockBeforeGenReturns->bbNext; block = block->bbNext) { - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { merger.Record(block); } @@ -3125,7 +3125,7 @@ void Compiler::fgInsertFuncletPrologBlock(BasicBlock* block) // It's a jump from outside the handler; add it to the newHead preds list and remove // it from the block preds list. - switch (predBlock->bbJumpKind) + switch (predBlock->GetBBJumpKind()) { case BBJ_CALLFINALLY: noway_assert(predBlock->bbJumpDest == block); @@ -3451,7 +3451,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // so the code size for block needs be large // enough to make it worth our while // - if ((lblk == nullptr) || (lblk->bbJumpKind != BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) + if ((lblk == nullptr) || !lblk->KindIs(BBJ_COND) || (fgGetCodeEstimate(block) >= 8)) { // This block is now a candidate for first cold block // Also remember the predecessor to this block @@ -3503,7 +3503,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // if (prevToFirstColdBlock->bbFallsThrough()) { - switch (prevToFirstColdBlock->bbJumpKind) + switch (prevToFirstColdBlock->GetBBJumpKind()) { default: noway_assert(!"Unhandled jumpkind in fgDetermineFirstColdBlock()"); @@ -3523,7 +3523,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // This is a slightly more complicated case, because we will // probably need to insert a block to jump to the cold section. // - if (firstColdBlock->isEmpty() && (firstColdBlock->bbJumpKind == BBJ_ALWAYS)) + if (firstColdBlock->isEmpty() && firstColdBlock->KindIs(BBJ_ALWAYS)) { // We can just use this block as the transitionBlock firstColdBlock = firstColdBlock->bbNext; @@ -3548,7 +3548,7 @@ PhaseStatus Compiler::fgDetermineFirstColdBlock() // convert it to BBJ_ALWAYS to force an explicit jump. prevToFirstColdBlock->bbJumpDest = firstColdBlock; - prevToFirstColdBlock->bbJumpKind = BBJ_ALWAYS; + prevToFirstColdBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); break; } } @@ -3981,7 +3981,7 @@ PhaseStatus Compiler::fgSetBlockOrder() (((src)->bbNum < (dst)->bbNum) || (((src)->bbFlags | (dst)->bbFlags) & BBF_GC_SAFE_POINT)) bool partiallyInterruptible = true; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_ALWAYS: diff --git a/src/coreclr/jit/gschecks.cpp b/src/coreclr/jit/gschecks.cpp index e0937b8975fbb..0953920d6192e 100644 --- a/src/coreclr/jit/gschecks.cpp +++ b/src/coreclr/jit/gschecks.cpp @@ -529,7 +529,7 @@ void Compiler::gsParamsToShadows() // We would have to insert assignments in all such blocks, just before GT_JMP stmnt. for (BasicBlock* const block : Blocks()) { - if (block->bbJumpKind != BBJ_RETURN) + if (!block->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/ifconversion.cpp b/src/coreclr/jit/ifconversion.cpp index b4b80f311873f..8489917bb7b54 100644 --- a/src/coreclr/jit/ifconversion.cpp +++ b/src/coreclr/jit/ifconversion.cpp @@ -83,7 +83,7 @@ class OptIfConversionDsc bool OptIfConversionDsc::IfConvertCheckInnerBlockFlow(BasicBlock* block) { // Block should have a single successor or be a return. - if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->bbJumpKind == BBJ_RETURN)))) + if (!(block->GetUniqueSucc() != nullptr || (m_doElseConversion && (block->KindIs(BBJ_RETURN))))) { return false; } @@ -137,7 +137,7 @@ bool OptIfConversionDsc::IfConvertCheckThenFlow() { // All the Then blocks up to m_finalBlock are in a valid flow. m_flowFound = true; - if (thenBlock->bbJumpKind == BBJ_RETURN) + if (thenBlock->KindIs(BBJ_RETURN)) { assert(m_finalBlock == nullptr); m_mainOper = GT_RETURN; @@ -553,7 +553,7 @@ void OptIfConversionDsc::IfConvertDump() bool OptIfConversionDsc::optIfConvert() { // Does the block end by branching via a JTRUE after a compare? - if (m_startBlock->bbJumpKind != BBJ_COND || m_startBlock->NumSucc() != 2) + if (!m_startBlock->KindIs(BBJ_COND) || m_startBlock->NumSucc() != 2) { return false; } @@ -743,7 +743,7 @@ bool OptIfConversionDsc::optIfConvert() // Update the flow from the original block. m_comp->fgRemoveAllRefPreds(m_startBlock->bbNext, m_startBlock); - m_startBlock->bbJumpKind = BBJ_ALWAYS; + m_startBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(m_comp)); #ifdef DEBUG if (m_comp->verbose) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index baf481d773fa6..91dbf2e5e7687 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -2455,7 +2455,7 @@ GenTree* Compiler::impTypeIsAssignable(GenTree* typeTo, GenTree* typeFrom) void Compiler::verConvertBBToThrowVerificationException(BasicBlock* block DEBUGARG(bool logMsg)) { - block->bbJumpKind = BBJ_THROW; + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); block->bbFlags |= BBF_FAILED_VERIFICATION; block->bbFlags &= ~BBF_IMPORTED; @@ -4101,7 +4101,7 @@ bool Compiler::impIsImplicitTailCallCandidate( // the block containing call is marked as BBJ_RETURN // We allow shared ret tail call optimization on recursive calls even under // !FEATURE_TAILCALL_OPT_SHARED_RETURN. - if (!isRecursive && (compCurBB->bbJumpKind != BBJ_RETURN)) + if (!isRecursive && !compCurBB->KindIs(BBJ_RETURN)) return false; #endif // !FEATURE_TAILCALL_OPT_SHARED_RETURN @@ -4250,7 +4250,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != NULL); // should be a BB boundary BasicBlock* step = DUMMY_INIT(NULL); @@ -4321,8 +4321,8 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY if (endCatches) { @@ -4344,7 +4344,7 @@ void Compiler::impImportLeave(BasicBlock* block) /* Calling the finally block */ callBlock = fgNewBBinRegion(BBJ_CALLFINALLY, XTnum + 1, 0, step); - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); if (step->bbJumpDest != nullptr) { fgRemoveRefPred(step->bbJumpDest, step); @@ -4419,7 +4419,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (encFinallies == 0) { assert(step == DUMMY_INIT(NULL)); - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS if (endCatches) { @@ -4523,7 +4523,7 @@ void Compiler::impImportLeave(BasicBlock* block) impSpillSideEffects(true, CHECK_SPILL_ALL DEBUGARG("impImportLeave")); verCurrentState.esStackDepth = 0; - assert(block->bbJumpKind == BBJ_LEAVE); + assert(block->KindIs(BBJ_LEAVE)); assert(fgBBs == (BasicBlock**)0xCDCD || fgLookupBB(jmpAddr) != nullptr); // should be a BB boundary BasicBlock* step = nullptr; @@ -4572,9 +4572,9 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - step = block; - step->bbJumpKind = BBJ_EHCATCHRET; // convert the BBJ_LEAVE to BBJ_EHCATCHRET - stepType = ST_Catch; + step = block; + step->SetBBJumpKind(BBJ_EHCATCHRET DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_EHCATCHRET + stepType = ST_Catch; #ifdef DEBUG if (verbose) @@ -4606,7 +4606,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4651,7 +4651,7 @@ void Compiler::impImportLeave(BasicBlock* block) // the new BBJ_CALLFINALLY is in a different EH region, thus it can't just replace the BBJ_LEAVE, // which might be in the middle of the "try". In most cases, the BBJ_ALWAYS will jump to the // next block, and flow optimizations will remove it. - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgRemoveRefPred(block->bbJumpDest, block); block->bbJumpDest = callBlock; fgAddRefPred(callBlock, block); @@ -4672,8 +4672,8 @@ void Compiler::impImportLeave(BasicBlock* block) #else // !FEATURE_EH_CALLFINALLY_THUNKS - callBlock = block; - callBlock->bbJumpKind = BBJ_CALLFINALLY; // convert the BBJ_LEAVE to BBJ_CALLFINALLY + callBlock = block; + callBlock->SetBBJumpKind(BBJ_CALLFINALLY DEBUG_ARG(this)); // convert the BBJ_LEAVE to BBJ_CALLFINALLY #ifdef DEBUG if (verbose) @@ -4708,7 +4708,7 @@ void Compiler::impImportLeave(BasicBlock* block) assert(step->KindIs(BBJ_ALWAYS, BBJ_EHCATCHRET)); #if FEATURE_EH_CALLFINALLY_THUNKS - if (step->bbJumpKind == BBJ_EHCATCHRET) + if (step->KindIs(BBJ_EHCATCHRET)) { // Need to create another step block in the 'try' region that will actually branch to the // call-to-finally thunk. @@ -4758,7 +4758,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4850,12 +4850,12 @@ void Compiler::impImportLeave(BasicBlock* block) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); } else { assert(stepType == ST_Catch); - assert(step->bbJumpKind == BBJ_EHCATCHRET); + assert(step->KindIs(BBJ_EHCATCHRET)); } /* Create a new exit block in the try region for the existing step block to jump to in this scope */ @@ -4908,7 +4908,7 @@ void Compiler::impImportLeave(BasicBlock* block) if (step == nullptr) { - block->bbJumpKind = BBJ_ALWAYS; // convert the BBJ_LEAVE to a BBJ_ALWAYS + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); // convert the BBJ_LEAVE to a BBJ_ALWAYS #ifdef DEBUG if (verbose) @@ -4931,7 +4931,7 @@ void Compiler::impImportLeave(BasicBlock* block) #if defined(TARGET_ARM) if (stepType == ST_FinallyReturn) { - assert(step->bbJumpKind == BBJ_ALWAYS); + assert(step->KindIs(BBJ_ALWAYS)); // Mark the target of a finally return step->bbJumpDest->bbFlags |= BBF_FINALLY_TARGET; } @@ -4992,9 +4992,9 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) // work around this we will duplicate B0 (call it B0Dup) before resetting. B0Dup is marked as BBJ_CALLFINALLY and // only serves to pair up with B1 (BBJ_ALWAYS) that got orphaned. Now during orphan block deletion B0Dup and B1 // will be treated as pair and handled correctly. - if (block->bbJumpKind == BBJ_CALLFINALLY) + if (block->KindIs(BBJ_CALLFINALLY)) { - BasicBlock* dupBlock = bbNewBasicBlock(block->bbJumpKind); + BasicBlock* dupBlock = bbNewBasicBlock(block->GetBBJumpKind()); dupBlock->bbFlags = block->bbFlags; dupBlock->bbJumpDest = block->bbJumpDest; fgAddRefPred(dupBlock->bbJumpDest, dupBlock); @@ -5024,7 +5024,7 @@ void Compiler::impResetLeaveBlock(BasicBlock* block, unsigned jmpAddr) } #endif // FEATURE_EH_FUNCLETS - block->bbJumpKind = BBJ_LEAVE; + block->SetBBJumpKind(BBJ_LEAVE DEBUG_ARG(this)); fgInitBBLookup(); fgRemoveRefPred(block->bbJumpDest, block); @@ -6002,7 +6002,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Change block to BBJ_THROW so we won't trigger importation of successors. // - block->bbJumpKind = BBJ_THROW; + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); // If this method has a explicit generic context, the only uses of it may be in // the IL for this block. So assume it's used. @@ -6715,7 +6715,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) /* Mark current bb as end of filter */ assert(compCurBB->bbFlags & BBF_DONT_REMOVE); - assert(compCurBB->bbJumpKind == BBJ_EHFILTERRET); + assert(compCurBB->KindIs(BBJ_EHFILTERRET)); /* Mark catch handler as successor */ @@ -7256,7 +7256,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) } JITDUMP(" %04X", jmpAddr); - if (block->bbJumpKind != BBJ_LEAVE) + if (!block->KindIs(BBJ_LEAVE)) { impResetLeaveBlock(block, jmpAddr); } @@ -7302,16 +7302,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7363,11 +7363,11 @@ void Compiler::impImportBlockCode(BasicBlock* block) assert(!opts.compDbgCode); BBjumpKinds foldedJumpKind = (BBjumpKinds)(op1->AsIntCon()->gtIconVal ? BBJ_ALWAYS : BBJ_NONE); - assertImp((block->bbJumpKind == BBJ_COND) // normal case - || (block->bbJumpKind == foldedJumpKind)); // this can happen if we are reimporting the - // block for the second time + // BBJ_COND: normal case + // foldedJumpKind: this can happen if we are reimporting the block for the second time + assertImp(block->KindIs(BBJ_COND, foldedJumpKind)); // normal case - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { if (foldedJumpKind == BBJ_NONE) { @@ -7380,7 +7380,7 @@ void Compiler::impImportBlockCode(BasicBlock* block) block->bbJumpDest->bbNum); fgRemoveRefPred(block->bbNext, block); } - block->bbJumpKind = foldedJumpKind; + block->SetBBJumpKind(foldedJumpKind DEBUG_ARG(this)); } break; @@ -7548,16 +7548,16 @@ void Compiler::impImportBlockCode(BasicBlock* block) { // We may have already modified `block`'s jump kind, if this is a re-importation. // - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { JITDUMP(FMT_BB " both branches and falls through to " FMT_BB ", changing to BBJ_NONE\n", block->bbNum, block->bbNext->bbNum); fgRemoveRefPred(block->bbJumpDest, block); - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { - assert(block->bbJumpKind == BBJ_NONE); + assert(block->KindIs(BBJ_NONE)); } if (op1->gtFlags & GTF_GLOB_EFFECT) @@ -7633,13 +7633,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -7657,8 +7657,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) { printf("\nSwitch folded at " FMT_BB "\n", block->bbNum); printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -8532,8 +8532,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) } bool bbInALoop = impBlockIsInALoop(block); - bool bbIsReturn = (block->bbJumpKind == BBJ_RETURN) && - (!compIsForInlining() || (impInlineInfo->iciBlock->bbJumpKind == BBJ_RETURN)); + bool bbIsReturn = block->KindIs(BBJ_RETURN) && + (!compIsForInlining() || (impInlineInfo->iciBlock->KindIs(BBJ_RETURN))); LclVarDsc* const lclDsc = lvaGetDesc(lclNum); if (fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { @@ -11279,7 +11279,7 @@ void Compiler::impImportBlock(BasicBlock* block) unsigned multRef = impCanReimport ? unsigned(~0) : 0; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: @@ -12117,11 +12117,11 @@ void Compiler::impImport() JITDUMP("Marking leading BBF_INTERNAL block " FMT_BB " as BBF_IMPORTED\n", entryBlock->bbNum); entryBlock->bbFlags |= BBF_IMPORTED; - if (entryBlock->bbJumpKind == BBJ_NONE) + if (entryBlock->KindIs(BBJ_NONE)) { entryBlock = entryBlock->bbNext; } - else if (opts.IsOSR() && (entryBlock->bbJumpKind == BBJ_ALWAYS)) + else if (opts.IsOSR() && entryBlock->KindIs(BBJ_ALWAYS)) { entryBlock = entryBlock->bbJumpDest; } @@ -12239,7 +12239,7 @@ void Compiler::impFixPredLists() continue; } - if (finallyBlock->bbJumpKind != BBJ_EHFINALLYRET) + if (!finallyBlock->KindIs(BBJ_EHFINALLYRET)) { continue; } diff --git a/src/coreclr/jit/importercalls.cpp b/src/coreclr/jit/importercalls.cpp index e8efd2a316980..02774119c82e1 100644 --- a/src/coreclr/jit/importercalls.cpp +++ b/src/coreclr/jit/importercalls.cpp @@ -1095,7 +1095,7 @@ var_types Compiler::impImportCall(OPCODE opcode, // assert(compCurBB is not a catch, finally or filter block); // assert(compCurBB is not a try block protected by a finally block); - assert(!isExplicitTailCall || compCurBB->bbJumpKind == BBJ_RETURN); + assert(!isExplicitTailCall || compCurBB->KindIs(BBJ_RETURN)); // Ask VM for permission to tailcall if (canTailCall) @@ -1271,10 +1271,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // BBJ_RETURN successor. Mark that successor so we can handle it specially during profile // instrumentation. // - if (compCurBB->bbJumpKind != BBJ_RETURN) + if (!compCurBB->KindIs(BBJ_RETURN)) { BasicBlock* const successor = compCurBB->GetUniqueSucc(); - assert(successor->bbJumpKind == BBJ_RETURN); + assert(successor->KindIs(BBJ_RETURN)); successor->bbFlags |= BBF_TAILCALL_SUCCESSOR; optMethodFlags |= OMF_HAS_TAILCALL_SUCCESSOR; } diff --git a/src/coreclr/jit/indirectcalltransformer.cpp b/src/coreclr/jit/indirectcalltransformer.cpp index 37f0d626cbbc3..da1fb1933b239 100644 --- a/src/coreclr/jit/indirectcalltransformer.cpp +++ b/src/coreclr/jit/indirectcalltransformer.cpp @@ -572,8 +572,8 @@ class IndirectCallTransformer { // There's no need for a new block here. We can just append to currBlock. // - checkBlock = currBlock; - checkBlock->bbJumpKind = BBJ_COND; + checkBlock = currBlock; + checkBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); } else { @@ -652,7 +652,7 @@ class IndirectCallTransformer if (isLastCheck && ((origCall->gtCallMoreFlags & GTF_CALL_M_GUARDED_DEVIRT_EXACT) != 0)) { checkBlock->bbJumpDest = nullptr; - checkBlock->bbJumpKind = BBJ_NONE; + checkBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(compiler)); return; } @@ -1073,7 +1073,7 @@ class IndirectCallTransformer // BasicBlock* const coldBlock = checkBlock->bbPrev; - if (coldBlock->bbJumpKind != BBJ_NONE) + if (!coldBlock->KindIs(BBJ_NONE)) { JITDUMP("Unexpected flow from cold path " FMT_BB "\n", coldBlock->bbNum); return; @@ -1081,7 +1081,7 @@ class IndirectCallTransformer BasicBlock* const hotBlock = coldBlock->bbPrev; - if ((hotBlock->bbJumpKind != BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) + if (!hotBlock->KindIs(BBJ_ALWAYS) || (hotBlock->bbJumpDest != checkBlock)) { JITDUMP("Unexpected flow from hot path " FMT_BB "\n", hotBlock->bbNum); return; @@ -1126,7 +1126,7 @@ class IndirectCallTransformer // not fall through to the check block. // compiler->fgRemoveRefPred(checkBlock, coldBlock); - coldBlock->bbJumpKind = BBJ_ALWAYS; + coldBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(compiler)); coldBlock->bbJumpDest = elseBlock; compiler->fgAddRefPred(elseBlock, coldBlock); } diff --git a/src/coreclr/jit/jiteh.cpp b/src/coreclr/jit/jiteh.cpp index e9128fc26d13f..9de15947cfdd6 100644 --- a/src/coreclr/jit/jiteh.cpp +++ b/src/coreclr/jit/jiteh.cpp @@ -960,7 +960,7 @@ void Compiler::ehGetCallFinallyBlockRange(unsigned finallyIndex, BasicBlock** be bool Compiler::ehCallFinallyInCorrectRegion(BasicBlock* blockCallFinally, unsigned finallyIndex) { - assert(blockCallFinally->bbJumpKind == BBJ_CALLFINALLY); + assert(blockCallFinally->KindIs(BBJ_CALLFINALLY)); assert(finallyIndex != EHblkDsc::NO_ENCLOSING_INDEX); assert(finallyIndex < compHndBBtabCount); assert(ehGetDsc(finallyIndex)->HasFinallyHandler()); @@ -2276,7 +2276,7 @@ bool Compiler::fgNormalizeEHCase2() // Change pred branches. // - if (predBlock->bbJumpKind != BBJ_NONE) + if (!predBlock->KindIs(BBJ_NONE)) { fgReplaceJumpTarget(predBlock, newTryStart, insertBeforeBlk); } @@ -3506,7 +3506,7 @@ void Compiler::fgVerifyHandlerTab() } // Check for legal block types - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: { @@ -4056,12 +4056,12 @@ void Compiler::fgClearFinallyTargetBit(BasicBlock* block) for (BasicBlock* const predBlock : block->PredBlocks()) { - if (predBlock->bbJumpKind == BBJ_ALWAYS && predBlock->bbJumpDest == block) + if (predBlock->KindIs(BBJ_ALWAYS) && predBlock->bbJumpDest == block) { BasicBlock* pPrev = predBlock->bbPrev; if (pPrev != nullptr) { - if (pPrev->bbJumpKind == BBJ_CALLFINALLY) + if (pPrev->KindIs(BBJ_CALLFINALLY)) { // We found a BBJ_CALLFINALLY / BBJ_ALWAYS that still points to this finally target return; @@ -4113,7 +4113,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) ((xtab->ebdHndBeg->bbNext == block) && (xtab->ebdHndBeg->bbFlags & BBF_INTERNAL))); // After we've already inserted a header block, and we're // trying to decide how to split up the predecessor edges. - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { assert(predBlock->bbJumpDest == block); @@ -4184,7 +4184,7 @@ bool Compiler::fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block) // The block is a handler. Check if the pred block is from its filter. We only need to // check the end filter flag, as there is only a single filter for any handler, and we // already know predBlock is a predecessor of block. - if (predBlock->bbJumpKind == BBJ_EHFILTERRET) + if (predBlock->KindIs(BBJ_EHFILTERRET)) { assert(!xtab->InHndRegionBBRange(predBlock)); return false; @@ -4413,7 +4413,7 @@ void Compiler::fgExtendEHRegionBefore(BasicBlock* block) { BasicBlock* bFilterLast = HBtab->BBFilterLast(); assert(bFilterLast != nullptr); - assert(bFilterLast->bbJumpKind == BBJ_EHFILTERRET); + assert(bFilterLast->KindIs(BBJ_EHFILTERRET)); assert(bFilterLast->bbJumpDest == block); #ifdef DEBUG if (verbose) diff --git a/src/coreclr/jit/lclvars.cpp b/src/coreclr/jit/lclvars.cpp index 57b4f164fd444..8af56fa167317 100644 --- a/src/coreclr/jit/lclvars.cpp +++ b/src/coreclr/jit/lclvars.cpp @@ -4098,7 +4098,7 @@ void Compiler::lvaMarkLclRefs(GenTree* tree, BasicBlock* block, Statement* stmt, if (!varDsc->lvDisqualifySingleDefRegCandidate) // If this var is already disqualified, we can skip this { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); // TODO: Zero-inits in LSRA are created with below condition. But if filter out based on that condition // we filter a lot of interesting variables that would benefit otherwise with EH var enregistration. // bool needsExplicitZeroInit = !varDsc->lvIsParam && (info.compInitMem || diff --git a/src/coreclr/jit/lir.cpp b/src/coreclr/jit/lir.cpp index 4389b6d6c4d8e..44e810592a006 100644 --- a/src/coreclr/jit/lir.cpp +++ b/src/coreclr/jit/lir.cpp @@ -1770,7 +1770,7 @@ void LIR::InsertBeforeTerminator(BasicBlock* block, LIR::Range&& range) assert(insertionPoint != nullptr); #if DEBUG - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: assert(insertionPoint->OperIsConditionalJump()); diff --git a/src/coreclr/jit/liveness.cpp b/src/coreclr/jit/liveness.cpp index 62f0e1784a1d3..d32854e4224c7 100644 --- a/src/coreclr/jit/liveness.cpp +++ b/src/coreclr/jit/liveness.cpp @@ -378,7 +378,7 @@ void Compiler::fgPerBlockLocalVarLiveness() block->bbMemoryLiveIn = fullMemoryKindSet; block->bbMemoryLiveOut = fullMemoryKindSet; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_EHFINALLYRET: case BBJ_EHFAULTRET: @@ -491,7 +491,7 @@ void Compiler::fgPerBlockLocalVarLiveness() // Mark the FrameListRoot as used, if applicable. - if (block->bbJumpKind == BBJ_RETURN && compMethodRequiresPInvokeFrame()) + if (block->KindIs(BBJ_RETURN) && compMethodRequiresPInvokeFrame()) { assert((!opts.ShouldUsePInvokeHelpers()) || (info.compLvFrameListRoot == BAD_VAR_NUM)); if (!opts.ShouldUsePInvokeHelpers()) @@ -886,7 +886,7 @@ void Compiler::fgExtendDbgLifetimes() { VarSetOps::ClearD(this, initVars); - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_NONE: PREFIX_ASSUME(block->bbNext != nullptr); @@ -2451,7 +2451,7 @@ void Compiler::fgInterBlockLocalVarLiveness() { // Get the set of live variables on exit from an exception region. VarSetOps::UnionD(this, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, diff --git a/src/coreclr/jit/loopcloning.cpp b/src/coreclr/jit/loopcloning.cpp index 8805610d56a1a..f976f1d46adf6 100644 --- a/src/coreclr/jit/loopcloning.cpp +++ b/src/coreclr/jit/loopcloning.cpp @@ -1767,7 +1767,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) unsigned loopRetCount = 0; for (BasicBlock* const blk : loop.LoopBlocks()) { - if (blk->bbJumpKind == BBJ_RETURN) + if (blk->KindIs(BBJ_RETURN)) { loopRetCount++; } @@ -1856,7 +1856,7 @@ bool Compiler::optIsLoopClonable(unsigned loopInd) BasicBlock* top = loop.lpTop; BasicBlock* bottom = loop.lpBottom; - if (bottom->bbJumpKind != BBJ_COND) + if (!bottom->KindIs(BBJ_COND)) { JITDUMP("Loop cloning: rejecting loop " FMT_LP ". Couldn't find termination test.\n", loopInd); return false; @@ -1946,7 +1946,7 @@ BasicBlock* Compiler::optInsertLoopChoiceConditions(LoopCloneContext* context, JITDUMP("Inserting loop " FMT_LP " loop choice conditions\n", loopNum); assert(context->HasBlockConditions(loopNum)); assert(slowHead != nullptr); - assert(insertAfter->bbJumpKind == BBJ_NONE); + assert(insertAfter->KindIs(BBJ_NONE)); if (context->HasBlockConditions(loopNum)) { @@ -2044,11 +2044,11 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) h2->bbNatLoopNum = ambientLoop; h2->bbFlags |= BBF_LOOP_PREHEADER; - if (h->bbJumpKind != BBJ_NONE) + if (!h->KindIs(BBJ_NONE)) { - assert(h->bbJumpKind == BBJ_ALWAYS); + assert(h->KindIs(BBJ_ALWAYS)); assert(h->bbJumpDest == loop.lpEntry); - h2->bbJumpKind = BBJ_ALWAYS; + h2->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); h2->bbJumpDest = loop.lpEntry; } @@ -2063,16 +2063,16 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Make 'h' fall through to 'h2' (if it didn't already). // Don't add the h->h2 edge because we're going to insert the cloning conditions between 'h' and 'h2', and // optInsertLoopChoiceConditions() will add the edge. - h->bbJumpKind = BBJ_NONE; + h->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); h->bbJumpDest = nullptr; // Make X2 after B, if necessary. (Not necessary if B is a BBJ_ALWAYS.) // "newPred" will be the predecessor of the blocks of the cloned loop. BasicBlock* b = loop.lpBottom; BasicBlock* newPred = b; - if (b->bbJumpKind != BBJ_ALWAYS) + if (!b->KindIs(BBJ_ALWAYS)) { - assert(b->bbJumpKind == BBJ_COND); + assert(b->KindIs(BBJ_COND)); BasicBlock* x = b->bbNext; if (x != nullptr) @@ -2117,7 +2117,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) BlockToBlockMap* blockMap = new (getAllocator(CMK_LoopClone)) BlockToBlockMap(getAllocator(CMK_LoopClone)); for (BasicBlock* const blk : loop.LoopBlocks()) { - BasicBlock* newBlk = fgNewBBafter(blk->bbJumpKind, newPred, /*extendRegion*/ true); + BasicBlock* newBlk = fgNewBBafter(blk->GetBBJumpKind(), newPred, /*extendRegion*/ true); JITDUMP("Adding " FMT_BB " (copy of " FMT_BB ") after " FMT_BB "\n", newBlk->bbNum, blk->bbNum, newPred->bbNum); // Call CloneBlockState to make a copy of the block's statements (and attributes), and assert that it @@ -2176,7 +2176,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) bool b = blockMap->Lookup(blk, &newblk); assert(b && newblk != nullptr); - assert(blk->bbJumpKind == newblk->bbJumpKind); + assert(blk->KindIs(newblk->GetBBJumpKind())); // First copy the jump destination(s) from "blk". optCopyBlkDest(blk, newblk); @@ -2185,7 +2185,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) optRedirectBlock(newblk, blockMap); // Add predecessor edges for the new successors, as well as the fall-through paths. - switch (newblk->bbJumpKind) + switch (newblk->GetBBJumpKind()) { case BBJ_NONE: fgAddRefPred(newblk->bbNext, newblk); @@ -2244,7 +2244,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // We should always have block conditions. assert(context->HasBlockConditions(loopInd)); - assert(h->bbJumpKind == BBJ_NONE); + assert(h->KindIs(BBJ_NONE)); assert(h->bbNext == h2); // If any condition is false, go to slowHead (which branches or falls through to e2). @@ -2255,8 +2255,8 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) if (slowHead->bbNext != e2) { // We can't just fall through to the slow path entry, so make it an unconditional branch. - assert(slowHead->bbJumpKind == BBJ_NONE); // This is how we created it above. - slowHead->bbJumpKind = BBJ_ALWAYS; + assert(slowHead->KindIs(BBJ_NONE)); // This is how we created it above. + slowHead->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); slowHead->bbJumpDest = e2; } @@ -2267,7 +2267,7 @@ void Compiler::optCloneLoop(unsigned loopInd, LoopCloneContext* context) // Add the fall-through path pred (either to T/E for fall-through from conditions to fast path, // or H2 if branch to E of fast path). - assert(condLast->bbJumpKind == BBJ_COND); + assert(condLast->KindIs(BBJ_COND)); JITDUMP("Adding " FMT_BB " -> " FMT_BB "\n", condLast->bbNum, condLast->bbNext->bbNum); fgAddRefPred(condLast->bbNext, condLast); diff --git a/src/coreclr/jit/lower.cpp b/src/coreclr/jit/lower.cpp index 7bc73d6bcecf0..07f1f95ff4ebd 100644 --- a/src/coreclr/jit/lower.cpp +++ b/src/coreclr/jit/lower.cpp @@ -801,12 +801,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) noway_assert(comp->opts.OptimizationDisabled()); if (originalSwitchBB->bbNext == jumpTab[0]) { - originalSwitchBB->bbJumpKind = BBJ_NONE; + originalSwitchBB->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = nullptr; } else { - originalSwitchBB->bbJumpKind = BBJ_ALWAYS; + originalSwitchBB->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[0]; } // Remove extra predecessor links if there was more than one case. @@ -891,16 +891,16 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // afterDefaultCondBlock is now the switch, and all the switch targets have it as a predecessor. // originalSwitchBB is now a BBJ_NONE, and there is a predecessor edge in afterDefaultCondBlock // representing the fall-through flow from originalSwitchBB. - assert(originalSwitchBB->bbJumpKind == BBJ_NONE); + assert(originalSwitchBB->KindIs(BBJ_NONE)); assert(originalSwitchBB->bbNext == afterDefaultCondBlock); - assert(afterDefaultCondBlock->bbJumpKind == BBJ_SWITCH); + assert(afterDefaultCondBlock->KindIs(BBJ_SWITCH)); assert(afterDefaultCondBlock->bbJumpSwt->bbsHasDefault); assert(afterDefaultCondBlock->isEmpty()); // Nothing here yet. // The GT_SWITCH code is still in originalSwitchBB (it will be removed later). // Turn originalSwitchBB into a BBJ_COND. - originalSwitchBB->bbJumpKind = BBJ_COND; + originalSwitchBB->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); originalSwitchBB->bbJumpDest = jumpTab[jumpCnt - 1]; // Fix the pred for the default case: the default block target still has originalSwitchBB @@ -957,12 +957,12 @@ GenTree* Lowering::LowerSwitch(GenTree* node) } if (afterDefaultCondBlock->bbNext == uniqueSucc) { - afterDefaultCondBlock->bbJumpKind = BBJ_NONE; + afterDefaultCondBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = nullptr; } else { - afterDefaultCondBlock->bbJumpKind = BBJ_ALWAYS; + afterDefaultCondBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); afterDefaultCondBlock->bbJumpDest = uniqueSucc; } } @@ -1036,13 +1036,13 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // case: there is no need to compare against the case index, since it's // guaranteed to be taken (since the default case was handled first, above). - currentBlock->bbJumpKind = BBJ_ALWAYS; + currentBlock->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(comp)); } else { // Otherwise, it's a conditional branch. Set the branch kind, then add the // condition statement. - currentBlock->bbJumpKind = BBJ_COND; + currentBlock->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); // Now, build the conditional statement for the current case that is // being evaluated: @@ -1074,8 +1074,8 @@ GenTree* Lowering::LowerSwitch(GenTree* node) // so fgRemoveBlock() doesn't complain. JITDUMP("Lowering switch " FMT_BB ": all switch cases were fall-through\n", originalSwitchBB->bbNum); assert(currentBlock == afterDefaultCondBlock); - assert(currentBlock->bbJumpKind == BBJ_SWITCH); - currentBlock->bbJumpKind = BBJ_NONE; + assert(currentBlock->KindIs(BBJ_SWITCH)); + currentBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(comp)); currentBlock->bbFlags &= ~BBF_DONT_REMOVE; comp->fgRemoveBlock(currentBlock, /* unreachable */ false); // It's an empty block. } @@ -1159,7 +1159,7 @@ bool Lowering::TryLowerSwitchToBitTest( { assert(jumpCount >= 2); assert(targetCount >= 2); - assert(bbSwitch->bbJumpKind == BBJ_SWITCH); + assert(bbSwitch->KindIs(BBJ_SWITCH)); assert(switchValue->OperIs(GT_LCL_VAR)); // @@ -1247,7 +1247,7 @@ bool Lowering::TryLowerSwitchToBitTest( // GenCondition bbSwitchCondition; - bbSwitch->bbJumpKind = BBJ_COND; + bbSwitch->SetBBJumpKind(BBJ_COND DEBUG_ARG(comp)); comp->fgRemoveAllRefPreds(bbCase1, bbSwitch); comp->fgRemoveAllRefPreds(bbCase0, bbSwitch); @@ -5317,8 +5317,7 @@ void Lowering::InsertPInvokeMethodEpilog(BasicBlock* returnBB DEBUGARG(GenTree* JITDUMP("======= Inserting PInvoke method epilog\n"); // Method doing PInvoke calls has exactly one return block unless it has "jmp" or tail calls. - assert(((returnBB == comp->genReturnBB) && (returnBB->bbJumpKind == BBJ_RETURN)) || - returnBB->endsWithTailCallOrJmp(comp)); + assert(((returnBB == comp->genReturnBB) && returnBB->KindIs(BBJ_RETURN)) || returnBB->endsWithTailCallOrJmp(comp)); LIR::Range& returnBlockRange = LIR::AsRange(returnBB); diff --git a/src/coreclr/jit/lsra.cpp b/src/coreclr/jit/lsra.cpp index ec19a65c13464..88af18d880898 100644 --- a/src/coreclr/jit/lsra.cpp +++ b/src/coreclr/jit/lsra.cpp @@ -964,7 +964,7 @@ void LinearScan::setBlockSequence() blockInfo[block->bbNum].hasCriticalInEdge = true; hasCriticalEdges = true; } - else if (predBlock->bbJumpKind == BBJ_SWITCH) + else if (predBlock->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -993,7 +993,7 @@ void LinearScan::setBlockSequence() // according to the desired order. We will handle the EH successors below. const unsigned numSuccs = block->NumSucc(compiler); bool checkForCriticalOutEdge = (numSuccs > 1); - if (!checkForCriticalOutEdge && block->bbJumpKind == BBJ_SWITCH) + if (!checkForCriticalOutEdge && block->KindIs(BBJ_SWITCH)) { assert(!"Switch with single successor"); } @@ -1549,7 +1549,7 @@ void LinearScan::identifyCandidatesExceptionDataflow() if (block->hasEHBoundaryOut()) { VarSetOps::UnionD(compiler, exceptVars, block->bbLiveOut); - if (block->bbJumpKind == BBJ_EHFINALLYRET) + if (block->KindIs(BBJ_EHFINALLYRET)) { // Live on exit from finally. // We track these separately because, in addition to having EH live-out semantics, @@ -2513,7 +2513,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, // IG08: // ... // ... - if (block->bbJumpKind == BBJ_THROW) + if (block->KindIs(BBJ_THROW)) { JITDUMP(" - throw block; "); return nullptr; @@ -2544,7 +2544,7 @@ BasicBlock* LinearScan::findPredBlockForLiveIn(BasicBlock* block, assert(!predBlock->hasEHBoundaryOut()); if (isBlockVisited(predBlock)) { - if (predBlock->bbJumpKind == BBJ_COND) + if (predBlock->KindIs(BBJ_COND)) { // Special handling to improve matching on backedges. BasicBlock* otherBlock = (block == predBlock->bbNext) ? predBlock->bbJumpDest : predBlock->bbNext; @@ -8177,7 +8177,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: Only switches and JCMP/JTEST (for Arm4) have input regs (and so can be fed by copies), so those // are the only block-ending branches that need special handling. regMaskTP consumedRegs = RBM_NONE; - if (block->bbJumpKind == BBJ_SWITCH) + if (block->KindIs(BBJ_SWITCH)) { // At this point, Lowering has transformed any non-switch-table blocks into // cascading ifs. @@ -8216,7 +8216,7 @@ void LinearScan::handleOutgoingCriticalEdges(BasicBlock* block) // Note: GT_COPY has special handling in codegen and its generation is merged with the // node that consumes its result. So both, the input and output regs of GT_COPY must be // excluded from the set available for resolution. - else if (block->bbJumpKind == BBJ_COND) + else if (block->KindIs(BBJ_COND)) { GenTree* lastNode = LIR::AsRange(block).LastNode(); diff --git a/src/coreclr/jit/morph.cpp b/src/coreclr/jit/morph.cpp index 6fa8a90d3bdfb..31166db9d122c 100644 --- a/src/coreclr/jit/morph.cpp +++ b/src/coreclr/jit/morph.cpp @@ -6122,7 +6122,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // No unique successor. compCurBB should be a return. // - assert(compCurBB->bbJumpKind == BBJ_RETURN); + assert(compCurBB->KindIs(BBJ_RETURN)); } else { @@ -6186,7 +6186,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Many tailcalls will have call and ret in the same block, and thus be // BBJ_RETURN, but if the call falls through to a ret, and we are doing a // tailcall, change it here. - compCurBB->bbJumpKind = BBJ_RETURN; + compCurBB->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(this)); } GenTree* stmtExpr = fgMorphStmt->GetRootNode(); @@ -6325,7 +6325,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) // Fast tail call: in case of fast tail calls, we need a jmp epilog and // hence mark it as BBJ_RETURN with BBF_JMP flag set. - noway_assert(compCurBB->bbJumpKind == BBJ_RETURN); + noway_assert(compCurBB->KindIs(BBJ_RETURN)); if (canFastTailCall) { compCurBB->bbFlags |= BBF_HAS_JMP; @@ -6334,7 +6334,7 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call) { // We call CORINFO_HELP_TAILCALL which does not return, so we will // not need epilogue. - compCurBB->bbJumpKind = BBJ_THROW; + compCurBB->SetBBJumpKind(BBJ_THROW DEBUG_ARG(this)); } if (isRootReplaced) @@ -7486,7 +7486,7 @@ void Compiler::fgMorphRecursiveFastTailCallIntoLoop(BasicBlock* block, GenTreeCa } // Finish hooking things up. - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); fgAddRefPred(block->bbJumpDest, block); block->bbFlags &= ~BBF_HAS_JMP; } @@ -8028,7 +8028,7 @@ GenTree* Compiler::fgMorphConst(GenTree* tree) // of CORINFO_HELP_STRCNS and go to cache first giving reasonable perf. bool useLazyStrCns = false; - if (compCurBB->bbJumpKind == BBJ_THROW) + if (compCurBB->KindIs(BBJ_THROW)) { useLazyStrCns = true; } @@ -13116,7 +13116,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) return result; } - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13179,9 +13179,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (cond->AsIntCon()->gtIconVal != 0) { /* JTRUE 1 - transform the basic block into a BBJ_ALWAYS */ - block->bbJumpKind = BBJ_ALWAYS; - bTaken = block->bbJumpDest; - bNotTaken = block->bbNext; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); + bTaken = block->bbJumpDest; + bNotTaken = block->bbNext; } else { @@ -13195,9 +13195,9 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } /* JTRUE 0 - transform the basic block into a BBJ_NONE */ - block->bbJumpKind = BBJ_NONE; - bTaken = block->bbNext; - bNotTaken = block->bbJumpDest; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); + bTaken = block->bbNext; + bNotTaken = block->bbJumpDest; } if (fgHaveValidEdgeWeights) @@ -13250,7 +13250,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) FlowEdge* edge; // Now fix the weights of the edges out of 'bUpdated' - switch (bUpdated->bbJumpKind) + switch (bUpdated->GetBBJumpKind()) { case BBJ_NONE: edge = fgGetPredForBlock(bUpdated->bbNext, bUpdated); @@ -13289,9 +13289,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13352,7 +13351,7 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) } } } - else if (block->bbJumpKind == BBJ_SWITCH) + else if (block->KindIs(BBJ_SWITCH)) { noway_assert(block->bbStmtList != nullptr && block->bbStmtList->GetPrevStmt() != nullptr); @@ -13425,13 +13424,13 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (curJump != block->bbNext) { // transform the basic block into a BBJ_ALWAYS - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = curJump; } else { // transform the basic block into a BBJ_NONE - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } foundVal = true; } @@ -13448,9 +13447,8 @@ Compiler::FoldResult Compiler::fgFoldConditional(BasicBlock* block) if (verbose) { printf("\nConditional folded at " FMT_BB "\n", block->bbNum); - printf(FMT_BB " becomes a %s", block->bbNum, - block->bbJumpKind == BBJ_ALWAYS ? "BBJ_ALWAYS" : "BBJ_NONE"); - if (block->bbJumpKind == BBJ_ALWAYS) + printf(FMT_BB " becomes a %s", block->bbNum, block->KindIs(BBJ_ALWAYS) ? "BBJ_ALWAYS" : "BBJ_NONE"); + if (block->KindIs(BBJ_ALWAYS)) { printf(" to " FMT_BB, block->bbJumpDest->bbNum); } @@ -13723,10 +13721,10 @@ void Compiler::fgMorphStmts(BasicBlock* block) // - a tail call dispatched via runtime help (IL stubs), in which // case there will not be any tailcall and the block will be ending // with BBJ_RETURN (as normal control flow) - noway_assert((call->IsFastTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN) && + noway_assert((call->IsFastTailCall() && compCurBB->KindIs(BBJ_RETURN) && ((compCurBB->bbFlags & BBF_HAS_JMP)) != 0) || - (call->IsTailCallViaJitHelper() && (compCurBB->bbJumpKind == BBJ_THROW)) || - (!call->IsTailCall() && (compCurBB->bbJumpKind == BBJ_RETURN))); + (call->IsTailCallViaJitHelper() && compCurBB->KindIs(BBJ_THROW)) || + (!call->IsTailCall() && compCurBB->KindIs(BBJ_RETURN))); } #ifdef DEBUG @@ -13802,7 +13800,7 @@ void Compiler::fgMorphStmts(BasicBlock* block) if (fgRemoveRestOfBlock) { - if ((block->bbJumpKind == BBJ_COND) || (block->bbJumpKind == BBJ_SWITCH)) + if (block->KindIs(BBJ_COND, BBJ_SWITCH)) { Statement* first = block->firstStmt(); noway_assert(first); @@ -13810,8 +13808,8 @@ void Compiler::fgMorphStmts(BasicBlock* block) noway_assert(lastStmt && lastStmt->GetNextStmt() == nullptr); GenTree* last = lastStmt->GetRootNode(); - if (((block->bbJumpKind == BBJ_COND) && (last->gtOper == GT_JTRUE)) || - ((block->bbJumpKind == BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) + if ((block->KindIs(BBJ_COND) && (last->gtOper == GT_JTRUE)) || + (block->KindIs(BBJ_SWITCH) && (last->gtOper == GT_SWITCH))) { GenTree* op1 = last->AsOp()->gtOp1; @@ -13919,7 +13917,7 @@ void Compiler::fgMorphBlocks() fgMorphStmts(block); // Do we need to merge the result of this block into a single return block? - if ((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) + if (block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)) { if ((genReturnBB != nullptr) && (genReturnBB != block)) { @@ -13975,7 +13973,7 @@ void Compiler::fgMorphBlocks() // void Compiler::fgMergeBlockReturn(BasicBlock* block) { - assert((block->bbJumpKind == BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); + assert(block->KindIs(BBJ_RETURN) && ((block->bbFlags & BBF_HAS_JMP) == 0)); assert((genReturnBB != nullptr) && (genReturnBB != block)); // TODO: Need to characterize the last top level stmt of a block ending with BBJ_RETURN. @@ -14000,7 +13998,7 @@ void Compiler::fgMergeBlockReturn(BasicBlock* block) else #endif // !TARGET_X86 { - block->bbJumpKind = BBJ_ALWAYS; + block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); block->bbJumpDest = genReturnBB; fgAddRefPred(genReturnBB, block); fgReturnCount--; diff --git a/src/coreclr/jit/objectalloc.cpp b/src/coreclr/jit/objectalloc.cpp index e589bb9f92d85..3694e83c248f2 100644 --- a/src/coreclr/jit/objectalloc.cpp +++ b/src/coreclr/jit/objectalloc.cpp @@ -510,7 +510,7 @@ unsigned int ObjectAllocator::MorphAllocObjNodeIntoStackAlloc(GenTreeAllocObj* a // Initialize the object memory if necessary. bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); LclVarDsc* const lclDsc = comp->lvaGetDesc(lclNum); if (comp->fgVarNeedsExplicitZeroInit(lclNum, bbInALoop, bbIsReturn)) { diff --git a/src/coreclr/jit/optimizebools.cpp b/src/coreclr/jit/optimizebools.cpp index 2efbf40b6d535..82d2430b91445 100644 --- a/src/coreclr/jit/optimizebools.cpp +++ b/src/coreclr/jit/optimizebools.cpp @@ -587,7 +587,7 @@ bool OptBoolsDsc::optOptimizeCompareChainCondBlock() // Update the flow. m_comp->fgRemoveRefPred(m_b1->bbJumpDest, m_b1); - m_b1->bbJumpKind = BBJ_NONE; + m_b1->SetBBJumpKind(BBJ_NONE DEBUG_ARG(m_comp)); // Fixup flags. m_b2->bbFlags |= (m_b1->bbFlags & BBF_COPY_PROPAGATE); @@ -877,18 +877,18 @@ void OptBoolsDsc::optOptimizeBoolsUpdateTrees() if (optReturnBlock) { m_b1->bbJumpDest = nullptr; - m_b1->bbJumpKind = BBJ_RETURN; + m_b1->SetBBJumpKind(BBJ_RETURN DEBUG_ARG(m_comp)); #ifdef DEBUG m_b1->bbJumpSwt = m_b2->bbJumpSwt; #endif - assert(m_b2->bbJumpKind == BBJ_RETURN); + assert(m_b2->KindIs(BBJ_RETURN)); assert(m_b1->bbNext == m_b2); assert(m_b3 != nullptr); } else { - assert(m_b1->bbJumpKind == BBJ_COND); - assert(m_b2->bbJumpKind == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); + assert(m_b2->KindIs(BBJ_COND)); assert(m_b1->bbJumpDest == m_b2->bbJumpDest); assert(m_b1->bbNext == m_b2); assert(m_b2->bbNext != nullptr); @@ -1180,7 +1180,7 @@ void OptBoolsDsc::optOptimizeBoolsGcStress() return; } - assert(m_b1->bbJumpKind == BBJ_COND); + assert(m_b1->KindIs(BBJ_COND)); Statement* const stmt = m_b1->lastStmt(); GenTree* const cond = stmt->GetRootNode(); @@ -1469,7 +1469,7 @@ PhaseStatus Compiler::optOptimizeBools() // We're only interested in conditional jumps here - if (b1->bbJumpKind != BBJ_COND) + if (!b1->KindIs(BBJ_COND)) { continue; } @@ -1492,7 +1492,7 @@ PhaseStatus Compiler::optOptimizeBools() // The next block needs to be a condition or return block. - if (b2->bbJumpKind == BBJ_COND) + if (b2->KindIs(BBJ_COND)) { if ((b1->bbJumpDest != b2->bbJumpDest) && (b1->bbJumpDest != b2->bbNext)) { @@ -1517,7 +1517,7 @@ PhaseStatus Compiler::optOptimizeBools() } #endif } - else if (b2->bbJumpKind == BBJ_RETURN) + else if (b2->KindIs(BBJ_RETURN)) { // Set b3 to b1 jump destination BasicBlock* b3 = b1->bbJumpDest; @@ -1531,7 +1531,7 @@ PhaseStatus Compiler::optOptimizeBools() // b3 must be RETURN type - if (b3->bbJumpKind != BBJ_RETURN) + if (!b3->KindIs(BBJ_RETURN)) { continue; } diff --git a/src/coreclr/jit/optimizer.cpp b/src/coreclr/jit/optimizer.cpp index 11c537226e878..75f4c7ed4cb83 100644 --- a/src/coreclr/jit/optimizer.cpp +++ b/src/coreclr/jit/optimizer.cpp @@ -741,7 +741,7 @@ bool Compiler::optPopulateInitInfo(unsigned loopInd, BasicBlock* initBlock, GenT bool initBlockOk = (predBlock == initBlock); if (!initBlockOk) { - if ((predBlock->bbJumpKind == BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && + if (predBlock->KindIs(BBJ_NONE) && (predBlock->bbNext == optLoopTable[loopInd].lpEntry) && (predBlock->countOfInEdges() == 1) && (predBlock->firstStmt() == nullptr) && (predBlock->bbPrev != nullptr) && predBlock->bbPrev->bbFallsThrough()) { @@ -1150,7 +1150,7 @@ bool Compiler::optExtractInitTestIncr( // If we are rebuilding the loop table, we would already have the pre-header block introduced // the first time, which might be empty if no hoisting has yet occurred. In this case, look a // little harder for the possible loop initialization statement. - if ((initBlock->bbJumpKind == BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && + if (initBlock->KindIs(BBJ_NONE) && (initBlock->bbNext == top) && (initBlock->countOfInEdges() == 1) && (initBlock->bbPrev != nullptr) && initBlock->bbPrev->bbFallsThrough()) { initBlock = initBlock->bbPrev; @@ -1305,7 +1305,7 @@ bool Compiler::optRecordLoop( // 5. Finding a constant initializer is optional; if the initializer is not found, or is not constant, // it is still considered a for-like loop. // - if (bottom->bbJumpKind == BBJ_COND) + if (bottom->KindIs(BBJ_COND)) { GenTree* init; GenTree* test; @@ -1385,7 +1385,7 @@ void Compiler::optCheckPreds() } } noway_assert(bb); - switch (bb->bbJumpKind) + switch (bb->GetBBJumpKind()) { case BBJ_COND: if (bb->bbJumpDest == block) @@ -1801,7 +1801,7 @@ class LoopSearch // BasicBlock* FindEntry(BasicBlock* head, BasicBlock* top, BasicBlock* bottom) { - if (head->bbJumpKind == BBJ_ALWAYS) + if (head->KindIs(BBJ_ALWAYS)) { if (head->bbJumpDest->bbNum <= bottom->bbNum && head->bbJumpDest->bbNum >= top->bbNum) { @@ -2294,7 +2294,7 @@ class LoopSearch { // Need to reconnect the flow from `block` to `oldNext`. - if ((block->bbJumpKind == BBJ_COND) && (block->bbJumpDest == newNext)) + if (block->KindIs(BBJ_COND) && (block->bbJumpDest == newNext)) { // Reverse the jump condition GenTree* test = block->lastNode(); @@ -2321,7 +2321,7 @@ class LoopSearch noway_assert((newBlock == nullptr) || loopBlocks.CanRepresent(newBlock->bbNum)); } } - else if ((block->bbJumpKind == BBJ_ALWAYS) && (block->bbJumpDest == newNext)) + else if (block->KindIs(BBJ_ALWAYS) && (block->bbJumpDest == newNext)) { // We've made `block`'s jump target its bbNext, so remove the jump. if (!comp->fgOptimizeBranchToNext(block, newNext, block->bbPrev)) @@ -2398,7 +2398,7 @@ class LoopSearch { BasicBlock* exitPoint; - switch (block->bbJumpKind) + switch (block->GetBBJumpKind()) { case BBJ_COND: case BBJ_CALLFINALLY: @@ -2416,7 +2416,7 @@ class LoopSearch // On non-funclet platforms (x86), the catch exit is a BBJ_ALWAYS, but we don't want that to // be considered a loop exit block, as catch handlers don't have predecessor lists and don't // show up as might be expected in the dominator tree. - if (block->bbJumpKind == BBJ_ALWAYS) + if (block->KindIs(BBJ_ALWAYS)) { if (!BasicBlock::sameHndRegion(block, exitPoint)) { @@ -2738,7 +2738,7 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R BasicBlock* newJumpDest = nullptr; - switch (blk->bbJumpKind) + switch (blk->GetBBJumpKind()) { case BBJ_NONE: case BBJ_THROW: @@ -2818,10 +2818,10 @@ void Compiler::optRedirectBlock(BasicBlock* blk, BlockToBlockMap* redirectMap, R // TODO-Cleanup: This should be a static member of the BasicBlock class. void Compiler::optCopyBlkDest(BasicBlock* from, BasicBlock* to) { - assert(from->bbJumpKind == to->bbJumpKind); // Precondition. + assert(from->KindIs(to->GetBBJumpKind())); // Precondition. // copy the jump destination(s) from "from" to "to". - switch (to->bbJumpKind) + switch (to->GetBBJumpKind()) { case BBJ_ALWAYS: case BBJ_LEAVE: @@ -2936,7 +2936,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // entry block. If the `head` branches to `top` because it is the BBJ_ALWAYS of a // BBJ_CALLFINALLY/BBJ_ALWAYS pair, we canonicalize by introducing a new fall-through // head block. See FindEntry() for the logic that allows this. - if ((h->bbJumpKind == BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (h->KindIs(BBJ_ALWAYS) && (h->bbJumpDest == t) && (h->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // Insert new head @@ -3030,7 +3030,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd) // not keeping pred lists in good shape. // BasicBlock* const t = optLoopTable[loopInd].lpTop; - assert(siblingB->bbJumpKind == BBJ_COND); + assert(siblingB->KindIs(BBJ_COND)); assert(siblingB->bbNext == t); JITDUMP(FMT_LP " head " FMT_BB " is also " FMT_LP " bottom\n", loopInd, h->bbNum, sibling); @@ -3207,8 +3207,8 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati // assert(h->bbNext == t); assert(h->bbFallsThrough()); - assert((h->bbJumpKind == BBJ_NONE) || (h->bbJumpKind == BBJ_COND)); - if (h->bbJumpKind == BBJ_COND) + assert(h->KindIs(BBJ_NONE, BBJ_COND)); + if (h->KindIs(BBJ_COND)) { BasicBlock* const hj = h->bbJumpDest; assert((hj->bbNum < t->bbNum) || (hj->bbNum > b->bbNum)); @@ -3360,7 +3360,7 @@ bool Compiler::optCanonicalizeLoopCore(unsigned char loopInd, LoopCanonicalizati childLoop = optLoopTable[childLoop].lpSibling) { if ((optLoopTable[childLoop].lpEntry == origE) && (optLoopTable[childLoop].lpHead == h) && - (newT->bbJumpKind == BBJ_NONE) && (newT->bbNext == origE)) + newT->KindIs(BBJ_NONE) && (newT->bbNext == origE)) { optUpdateLoopHead(childLoop, h, newT); @@ -4280,7 +4280,7 @@ PhaseStatus Compiler::optUnrollLoops() goto DONE_LOOP; } - if (block->bbJumpKind == BBJ_RETURN) + if (block->KindIs(BBJ_RETURN)) { ++loopRetCount; } @@ -4361,7 +4361,7 @@ PhaseStatus Compiler::optUnrollLoops() for (BasicBlock* block = loop.lpTop; block != loop.lpBottom->bbNext; block = block->bbNext) { BasicBlock* newBlock = insertAfter = - fgNewBBafter(block->bbJumpKind, insertAfter, /*extendRegion*/ true); + fgNewBBafter(block->GetBBJumpKind(), insertAfter, /*extendRegion*/ true); blockMap.Set(block, newBlock, BlockToBlockMap::Overwrite); if (!BasicBlock::CloneBlockState(this, newBlock, block, lvar, lval)) @@ -4415,7 +4415,7 @@ PhaseStatus Compiler::optUnrollLoops() { testCopyStmt->SetRootNode(sideEffList); } - newBlock->bbJumpKind = BBJ_NONE; + newBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } } @@ -4486,8 +4486,8 @@ PhaseStatus Compiler::optUnrollLoops() fgRemoveAllRefPreds(succ, block); } + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbStmtList = nullptr; - block->bbJumpKind = BBJ_NONE; block->bbJumpDest = nullptr; block->bbNatLoopNum = newLoopNum; @@ -4524,21 +4524,21 @@ PhaseStatus Compiler::optUnrollLoops() // // If the initBlock is a BBJ_COND drop the condition (and make initBlock a BBJ_NONE block). // - if (initBlock->bbJumpKind == BBJ_COND) + if (initBlock->KindIs(BBJ_COND)) { assert(dupCond); Statement* initBlockBranchStmt = initBlock->lastStmt(); noway_assert(initBlockBranchStmt->GetRootNode()->OperIs(GT_JTRUE)); fgRemoveStmt(initBlock, initBlockBranchStmt); fgRemoveRefPred(initBlock->bbJumpDest, initBlock); - initBlock->bbJumpKind = BBJ_NONE; + initBlock->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } else { /* the loop must execute */ assert(!dupCond); assert(totalIter > 0); - noway_assert(initBlock->bbJumpKind == BBJ_NONE); + noway_assert(initBlock->KindIs(BBJ_NONE)); } // The loop will be removed, so no need to fix up the pre-header. @@ -4548,7 +4548,7 @@ PhaseStatus Compiler::optUnrollLoops() // For unrolled loops, all the unrolling preconditions require the pre-header block to fall // through into TOP. - assert(head->bbJumpKind == BBJ_NONE); + assert(head->KindIs(BBJ_NONE)); } // If we actually unrolled, tail is now reached @@ -4840,7 +4840,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) // Does the BB end with an unconditional jump? - if (block->bbJumpKind != BBJ_ALWAYS || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) + if (!block->KindIs(BBJ_ALWAYS) || (block->bbFlags & BBF_KEEP_BBJ_ALWAYS)) { // It can't be one of the ones we use for our exception magic return false; @@ -4850,7 +4850,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) BasicBlock* const bTest = block->bbJumpDest; // Does the bTest consist of 'jtrue(cond) block' ? - if (bTest->bbJumpKind != BBJ_COND) + if (!bTest->KindIs(BBJ_COND)) { return false; } @@ -5077,7 +5077,7 @@ bool Compiler::optInvertWhileLoop(BasicBlock* block) bool foundCondTree = false; // Create a new block after `block` to put the copied condition code. - block->bbJumpKind = BBJ_NONE; + block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); block->bbJumpDest = nullptr; BasicBlock* bNewCond = fgNewBBafter(BBJ_COND, block, /*extendRegion*/ true); @@ -5425,7 +5425,7 @@ void Compiler::optMarkLoopHeads() { if (blockNum <= predBlock->bbNum) { - if (predBlock->bbJumpKind == BBJ_CALLFINALLY) + if (predBlock->KindIs(BBJ_CALLFINALLY)) { // Loops never have BBJ_CALLFINALLY as the source of their "back edge". continue; @@ -5530,7 +5530,7 @@ void Compiler::optFindAndScaleGeneralLoopBlocks() } // We only consider back-edges that are BBJ_COND or BBJ_ALWAYS for loops. - if ((bottom->bbJumpKind != BBJ_COND) && (bottom->bbJumpKind != BBJ_ALWAYS)) + if (!bottom->KindIs(BBJ_COND, BBJ_ALWAYS)) { continue; } @@ -8189,7 +8189,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) // The preheader block is part of the containing loop (if any). preHead->bbNatLoopNum = loop.lpParent; - if (fgIsUsingProfileWeights() && (head->bbJumpKind == BBJ_COND)) + if (fgIsUsingProfileWeights() && head->KindIs(BBJ_COND)) { if ((head->bbWeight == BB_ZERO_WEIGHT) || (entry->bbWeight == BB_ZERO_WEIGHT)) { @@ -8297,7 +8297,7 @@ bool Compiler::fgCreateLoopPreHeader(unsigned lnum) continue; } - switch (predBlock->bbJumpKind) + switch (predBlock->GetBBJumpKind()) { case BBJ_NONE: // This 'entry' predecessor that isn't dominated by 'entry' must be outside the loop, @@ -9172,7 +9172,7 @@ void Compiler::optRemoveRedundantZeroInits() if (tree->Data()->IsIntegralConst(0)) { bool bbInALoop = (block->bbFlags & BBF_BACKWARD_JUMP) != 0; - bool bbIsReturn = block->bbJumpKind == BBJ_RETURN; + bool bbIsReturn = block->KindIs(BBJ_RETURN); if (!bbInALoop || bbIsReturn) { diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index a2d6cb5633537..017509086d208 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -145,7 +145,7 @@ class PatchpointTransformer BasicBlock* helperBlock = CreateAndInsertBasicBlock(BBJ_NONE, block); // Update flow and flags - block->bbJumpKind = BBJ_COND; + block->SetBBJumpKind(BBJ_COND DEBUG_ARG(compiler)); block->bbJumpDest = remainderBlock; block->bbFlags |= BBF_INTERNAL; @@ -233,7 +233,7 @@ class PatchpointTransformer } // Update flow - block->bbJumpKind = BBJ_THROW; + block->SetBBJumpKind(BBJ_THROW DEBUG_ARG(compiler)); block->bbJumpDest = nullptr; // Add helper call diff --git a/src/coreclr/jit/redundantbranchopts.cpp b/src/coreclr/jit/redundantbranchopts.cpp index 32369d303d206..dfbd1863cb4b6 100644 --- a/src/coreclr/jit/redundantbranchopts.cpp +++ b/src/coreclr/jit/redundantbranchopts.cpp @@ -44,7 +44,7 @@ PhaseStatus Compiler::optRedundantBranches() // We currently can optimize some BBJ_CONDs. // - if (block->bbJumpKind == BBJ_COND) + if (block->KindIs(BBJ_COND)) { bool madeChangesThisBlock = m_compiler->optRedundantRelop(block); @@ -57,7 +57,7 @@ PhaseStatus Compiler::optRedundantBranches() // a BBJ_COND, retry; perhaps one of the later optimizations // we can do has enabled one of the earlier optimizations. // - if (madeChangesThisBlock && (block->bbJumpKind == BBJ_COND)) + if (madeChangesThisBlock && block->KindIs(BBJ_COND)) { JITDUMP("Will retry RBO in " FMT_BB " after partial optimization\n", block->bbNum); madeChangesThisBlock |= m_compiler->optRedundantBranch(block); @@ -508,7 +508,7 @@ bool Compiler::optRedundantBranch(BasicBlock* const block) // Check the current dominator // - if (domBlock->bbJumpKind == BBJ_COND) + if (domBlock->KindIs(BBJ_COND)) { Statement* const domJumpStmt = domBlock->lastStmt(); GenTree* const domJumpTree = domJumpStmt->GetRootNode(); @@ -971,8 +971,8 @@ bool Compiler::optJumpThreadCheck(BasicBlock* const block, BasicBlock* const dom // bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBlock, bool domIsSameRelop) { - assert(block->bbJumpKind == BBJ_COND); - assert(domBlock->bbJumpKind == BBJ_COND); + assert(block->KindIs(BBJ_COND)); + assert(domBlock->KindIs(BBJ_COND)); // If the dominating block is not the immediate dominator // we might need to duplicate a lot of code to thread @@ -990,7 +990,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl BasicBlock* idomBlock = block->bbIDom; while ((idomBlock != nullptr) && (idomBlock != domBlock)) { - if (idomBlock->bbJumpKind == BBJ_COND) + if (idomBlock->KindIs(BBJ_COND)) { JITDUMP(" -- " FMT_BB " not closest branching dom, so no threading\n", idomBlock->bbNum); return false; @@ -1082,7 +1082,7 @@ bool Compiler::optJumpThreadDom(BasicBlock* const block, BasicBlock* const domBl // Treat switch preds as ambiguous for now. // - if (predBlock->bbJumpKind == BBJ_SWITCH) + if (predBlock->KindIs(BBJ_SWITCH)) { JITDUMP(FMT_BB " is a switch pred\n", predBlock->bbNum); BlockSetOps::AddElemD(this, jti.m_ambiguousPreds, predBlock->bbNum); @@ -1450,8 +1450,8 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // const bool fallThroughIsTruePred = BlockSetOps::IsMember(this, jti.m_truePreds, jti.m_fallThroughPred->bbNum); - if ((jti.m_fallThroughPred->bbJumpKind == BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || - (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) + if (jti.m_fallThroughPred->KindIs(BBJ_NONE) && ((fallThroughIsTruePred && (jti.m_numFalsePreds == 0)) || + (!fallThroughIsTruePred && (jti.m_numTruePreds == 0)))) { JITDUMP(FMT_BB " has ambiguous preds and a (%s) fall through pred and no (%s) preds.\n" "Converting fall through pred " FMT_BB " to BBJ_ALWAYS\n", @@ -1460,7 +1460,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // Possibly defer this until after early out below. // - jti.m_fallThroughPred->bbJumpKind = BBJ_ALWAYS; + jti.m_fallThroughPred->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); jti.m_fallThroughPred->bbJumpDest = jti.m_block; modifiedFlow = true; } @@ -1532,7 +1532,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) fgRemoveStmt(jti.m_block, lastStmt); JITDUMP(" repurposing " FMT_BB " to always jump to " FMT_BB "\n", jti.m_block->bbNum, jti.m_trueTarget->bbNum); fgRemoveRefPred(jti.m_falseTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_ALWAYS; + jti.m_block->SetBBJumpKind(BBJ_ALWAYS DEBUG_ARG(this)); } else if (falsePredsWillReuseBlock) { @@ -1541,7 +1541,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) JITDUMP(" repurposing " FMT_BB " to always fall through to " FMT_BB "\n", jti.m_block->bbNum, jti.m_falseTarget->bbNum); fgRemoveRefPred(jti.m_trueTarget, jti.m_block); - jti.m_block->bbJumpKind = BBJ_NONE; + jti.m_block->SetBBJumpKind(BBJ_NONE DEBUG_ARG(this)); } // Now reroute the flow from the predecessors. @@ -1623,8 +1623,7 @@ bool Compiler::optJumpThreadCore(JumpThreadInfo& jti) // surviving ssa input, and update all the value numbers...) // BasicBlock* const ambBlock = jti.m_ambiguousVNBlock; - if ((ambBlock != nullptr) && (jti.m_block->bbJumpKind == BBJ_COND) && - (jti.m_block->GetUniquePred(this) == ambBlock)) + if ((ambBlock != nullptr) && jti.m_block->KindIs(BBJ_COND) && (jti.m_block->GetUniquePred(this) == ambBlock)) { JITDUMP(FMT_BB " has just one remaining predcessor " FMT_BB "\n", jti.m_block->bbNum, ambBlock->bbNum); diff --git a/src/coreclr/jit/switchrecognition.cpp b/src/coreclr/jit/switchrecognition.cpp index 5052e6ff57411..125c2cf2fbebe 100644 --- a/src/coreclr/jit/switchrecognition.cpp +++ b/src/coreclr/jit/switchrecognition.cpp @@ -319,7 +319,7 @@ bool Compiler::optSwitchConvert(BasicBlock* firstBlock, int testsCount, ssize_t* assert(isTest); // Convert firstBlock to a switch block - firstBlock->bbJumpKind = BBJ_SWITCH; + firstBlock->SetBBJumpKind(BBJ_SWITCH DEBUG_ARG(this)); firstBlock->bbJumpDest = nullptr; firstBlock->bbCodeOffsEnd = lastBlock->bbCodeOffsEnd; firstBlock->lastStmt()->GetRootNode()->ChangeOper(GT_SWITCH);