diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 2e06569c183b1..83af36ffb707d 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -1873,6 +1873,7 @@ void Compiler::compInit(ArenaAllocator* pAlloc, compJmpOpUsed = false; compLongUsed = false; compTailCallUsed = false; + compLocallocSeen = false; compLocallocUsed = false; compLocallocOptimized = false; compQmarkRationalized = false; diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h index 18fce48d72b34..7edac733712f5 100644 --- a/src/coreclr/jit/compiler.h +++ b/src/coreclr/jit/compiler.h @@ -9427,6 +9427,7 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX bool compLongUsed; // Does the method use TYP_LONG bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE bool compTailCallUsed; // Does the method do a tailcall + bool compLocallocSeen; // Does the method IL have localloc opcode bool compLocallocUsed; // Does the method use localloc. bool compLocallocOptimized; // Does the method have an optimized localloc bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON @@ -10242,6 +10243,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX return !info.compInitMem && opts.compDbgCode; } + // Returns true if the jit supports having patchpoints in this method. + // Optionally, get the reason why not. + bool compCanHavePatchpoints(const char** reason = nullptr); + #if defined(DEBUG) void compDispLocalVars(); diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 7fec5719abc36..85ce701c3e8c9 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -4785,6 +4785,45 @@ inline void LclVarDsc::setLvRefCntWtd(weight_t newValue, RefCountState state) m_lvRefCntWtd = newValue; } +//------------------------------------------------------------------------------ +// compCanHavePatchpoints: return true if patchpoints are supported in this +// method. +// +// Arguments: +// reason - [out, optional] reason why patchpoints are not supported +// +// Returns: +// True if patchpoints are supported in this method. +// +inline bool Compiler::compCanHavePatchpoints(const char** reason) +{ + const char* whyNot = nullptr; + +#ifdef FEATURE_ON_STACK_REPLACEMENT + if (compLocallocSeen) + { + whyNot = "localloc"; + } + else if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) + { + whyNot = "synchronized method"; + } + else if (opts.IsReversePInvoke()) + { + whyNot = "reverse pinvoke"; + } +#else + whyNot = "OSR feature not defined in build"; +#endif + + if (reason != nullptr) + { + *reason = whyNot; + } + + return whyNot == nullptr; +} + /*****************************************************************************/ #endif //_COMPILER_HPP_ /*****************************************************************************/ diff --git a/src/coreclr/jit/fgbasic.cpp b/src/coreclr/jit/fgbasic.cpp index 57683ab1fb15e..e5e38d4f3edaf 100644 --- a/src/coreclr/jit/fgbasic.cpp +++ b/src/coreclr/jit/fgbasic.cpp @@ -2015,6 +2015,8 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, Fixed case CEE_LOCALLOC: + compLocallocSeen = true; + // We now allow localloc callees to become candidates in some cases. if (makeInlineObservations) { diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 9ab98386c71a4..588baec50b5b0 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -11593,16 +11593,21 @@ void Compiler::impImportBlockCode(BasicBlock* block) // Are there any places in the method where we might add a patchpoint? if (compHasBackwardJump) { - // Are patchpoints enabled? - if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0)) + // Are patchpoints enabled and supported? + if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0) && + compCanHavePatchpoints()) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. assert(!compIsForInlining()); // Is the start of this block a suitable patchpoint? - // Current strategy is blocks that are stack-empty and backwards branch targets - if (block->bbFlags & BBF_BACKWARD_JUMP_TARGET && (verCurrentState.esStackDepth == 0)) + // Current strategy is blocks that are stack-empty and backwards branch targets and not in a handler + // + // Todo (perhaps): bail out of OSR and jit this method with optimization. + // + if (!block->hasHndIndex() && ((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && + (verCurrentState.esStackDepth == 0)) { block->bbFlags |= BBF_PATCHPOINT; setMethodHasPatchpoint(); @@ -11627,12 +11632,33 @@ void Compiler::impImportBlockCode(BasicBlock* block) // // Todo: stress mode... // - if ((JitConfig.TC_PartialCompilation() > 0) && opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && - (block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && - ((block->bbFlags & BBF_PATCHPOINT) == 0)) + if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_PartialCompilation() > 0) && + compCanHavePatchpoints()) { - block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; - setMethodHasPartialCompilationPatchpoint(); + // Is this block a good place for partial compilation? + // + if ((block != fgFirstBB) && block->isRunRarely() && (verCurrentState.esStackDepth == 0) && + ((block->bbFlags & BBF_PATCHPOINT) == 0) && !block->hasHndIndex()) + { + JITDUMP("\nBlock " FMT_BB " will be a partial compilation patchpoint -- not importing\n", block->bbNum); + block->bbFlags |= BBF_PARTIAL_COMPILATION_PATCHPOINT; + setMethodHasPartialCompilationPatchpoint(); + + // Change block to BBJ_THROW so we won't trigger importation of sucessors. + // + block->bbJumpKind = BBJ_THROW; + + // If this method has a explicit generic context, the only uses of it may be in + // the IL for this block. So assume it's used. + // + if (info.compMethodInfo->options & + (CORINFO_GENERICS_CTXT_FROM_METHODDESC | CORINFO_GENERICS_CTXT_FROM_METHODTABLE)) + { + lvaGenericsContextInUse = true; + } + + return; + } } #endif // FEATURE_ON_STACK_REPLACEMENT diff --git a/src/coreclr/jit/patchpoint.cpp b/src/coreclr/jit/patchpoint.cpp index 48d78babce1df..dd250c8607a11 100644 --- a/src/coreclr/jit/patchpoint.cpp +++ b/src/coreclr/jit/patchpoint.cpp @@ -57,35 +57,33 @@ class PatchpointTransformer { if (block->bbFlags & BBF_PATCHPOINT) { + // We can't OSR from funclets. + // + assert(!block->hasHndIndex()); + // Clear the patchpoint flag. // block->bbFlags &= ~BBF_PATCHPOINT; - // If block is in a handler region, don't insert a patchpoint. - // We can't OSR from funclets. - // - // TODO: check this earlier, somehow, and fall back to fully - // optimizing the method (ala QJFL=0). - if (compiler->ehGetBlockHndDsc(block) != nullptr) - { - JITDUMP("Patchpoint: skipping patchpoint for " FMT_BB " as it is in a handler\n", block->bbNum); - continue; - } - - JITDUMP("Patchpoint: loop patchpoint in " FMT_BB "\n", block->bbNum); - assert(block != compiler->fgFirstBB); + JITDUMP("Patchpoint: regular patchpoint in " FMT_BB "\n", block->bbNum); TransformBlock(block); count++; } else if (block->bbFlags & BBF_PARTIAL_COMPILATION_PATCHPOINT) { - if (compiler->ehGetBlockHndDsc(block) != nullptr) - { - JITDUMP("Patchpoint: skipping partial compilation patchpoint for " FMT_BB - " as it is in a handler\n", - block->bbNum); - continue; - } + // We can't OSR from funclets. + // Also, we don't import the IL for these blocks. + // + assert(!block->hasHndIndex()); + + // If we're instrumenting, we should not have decided to + // put class probes here, as that is driven by looking at IL. + // + assert((block->bbFlags & BBF_HAS_CLASS_PROFILE) == 0); + + // Clear the partial comp flag. + // + block->bbFlags &= ~BBF_PARTIAL_COMPILATION_PATCHPOINT; JITDUMP("Patchpoint: partial compilation patchpoint in " FMT_BB "\n", block->bbNum); TransformPartialCompilation(block); @@ -248,15 +246,6 @@ class PatchpointTransformer compiler->gtNewHelperCallNode(CORINFO_HELP_PARTIAL_COMPILATION_PATCHPOINT, TYP_VOID, helperArgs); compiler->fgNewStmtAtEnd(block, helperCall); - - // This block will no longer have class probes. - // (They will appear in the OSR variant). - // - if ((block->bbFlags & BBF_HAS_CLASS_PROFILE) != 0) - { - JITDUMP("No longer adding class probes to " FMT_BB "\n", block->bbNum); - block->bbFlags &= ~BBF_HAS_CLASS_PROFILE; - } } }; @@ -282,34 +271,8 @@ PhaseStatus Compiler::fgTransformPatchpoints() // We should only be adding patchpoints at Tier0, so should not be in an inlinee assert(!compIsForInlining()); - // We currently can't do OSR in methods with localloc. - // Such methods don't have a fixed relationship between frame and stack pointers. - // - // This is true whether or not the localloc was executed in the original method. - // - // TODO: handle this case, or else check this earlier and fall back to fully - // optimizing the method (ala QJFL=0). - if (compLocallocUsed) - { - JITDUMP("\n -- unable to handle methods with localloc\n"); - return PhaseStatus::MODIFIED_NOTHING; - } - - // We currently can't do OSR in synchronized methods. We need to alter - // the logic in fgAddSyncMethodEnterExit for OSR to not try and obtain the - // monitor (since the original method will have done so) and set the monitor - // obtained flag to true (or reuse the original method slot value). - if ((info.compFlags & CORINFO_FLG_SYNCH) != 0) - { - JITDUMP("\n -- unable to handle synchronized methods\n"); - return PhaseStatus::MODIFIED_NOTHING; - } - - if (opts.IsReversePInvoke()) - { - JITDUMP(" -- unable to handle Reverse P/Invoke\n"); - return PhaseStatus::MODIFIED_NOTHING; - } + // We should be allowed to have patchpoints in this method. + assert(compCanHavePatchpoints()); PatchpointTransformer ppTransformer(this); int count = ppTransformer.Run();