From a8babb391d8c4f1b4746a59cc481116f441fb176 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Fri, 21 Jan 2022 13:27:26 -0800 Subject: [PATCH 1/7] add osr plus stress tests for libraries --- eng/pipelines/libraries/run-test-job.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/eng/pipelines/libraries/run-test-job.yml b/eng/pipelines/libraries/run-test-job.yml index d864487760908b..0f2070a49bfbf7 100644 --- a/eng/pipelines/libraries/run-test-job.yml +++ b/eng/pipelines/libraries/run-test-job.yml @@ -191,4 +191,8 @@ jobs: - fullpgo_random_gdv - fullpgo_random_edge - fullpgo_random_gdv_edge + - jitosr + - jitosr_stress + - jitosr_stress_random + - jitosr_pgo From a8b8ab135e3992ac0a0f9e0dbd9c38af737a5649 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Fri, 21 Jan 2022 18:32:18 -0800 Subject: [PATCH 2/7] fix context reporting issue --- src/coreclr/jit/codegencommon.cpp | 32 ++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 1c9eda6f63afc1..0bfc2a04f55e4d 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -6385,15 +6385,37 @@ void CodeGen::genEnregisterOSRArgsAndLocals() void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed) { - // For OSR the original method has set this up for us. + assert(compiler->compGeneratingProlog); + + const bool reportArg = compiler->lvaReportParamTypeArg(); + if (compiler->opts.IsOSR()) { - return; - } + if (reportArg) + { + // OSR method will use Tier0 slot to report context arg. + // + JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n"); + return; + } - assert(compiler->compGeneratingProlog); + if (compiler->lvaKeepAliveAndReportThis()) + { + PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo; + if (ppInfo->HasKeptAliveThis()) + { + // OSR method will use Tier0 slot to report `this` as context. + // + JITDUMP("OSR method will use Tier0 frame slot for generic context `this`.\n"); + return; + } - bool reportArg = compiler->lvaReportParamTypeArg(); + // OSR method will use OSR frame slot to report `this` as context, + // and must initialize the slot. + // + JITDUMP("OSR method will report `this` generics context on its frame.\n"); + } + } // We should report either generic context arg or "this" when used so. if (!reportArg) From 3766dbc697dd548be35286fc05c5782c5697cc76 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Tue, 25 Jan 2022 08:58:47 -0800 Subject: [PATCH 3/7] have tier0 method always report generics context, then OSR method can just use that --- src/coreclr/jit/codegencommon.cpp | 23 ++++++++--------------- src/coreclr/jit/compiler.hpp | 18 +++++++++++++++--- 2 files changed, 23 insertions(+), 18 deletions(-) diff --git a/src/coreclr/jit/codegencommon.cpp b/src/coreclr/jit/codegencommon.cpp index 0bfc2a04f55e4d..fe8b2d82dcaec9 100644 --- a/src/coreclr/jit/codegencommon.cpp +++ b/src/coreclr/jit/codegencommon.cpp @@ -6391,30 +6391,23 @@ void CodeGen::genReportGenericContextArg(regNumber initReg, bool* pInitRegZeroed if (compiler->opts.IsOSR()) { + PatchpointInfo* const ppInfo = compiler->info.compPatchpointInfo; if (reportArg) { // OSR method will use Tier0 slot to report context arg. // + assert(ppInfo->HasGenericContextArgOffset()); JITDUMP("OSR method will use Tier0 frame slot for generics context arg.\n"); - return; } - - if (compiler->lvaKeepAliveAndReportThis()) + else if (compiler->lvaKeepAliveAndReportThis()) { - PatchpointInfo* ppInfo = compiler->info.compPatchpointInfo; - if (ppInfo->HasKeptAliveThis()) - { - // OSR method will use Tier0 slot to report `this` as context. - // - JITDUMP("OSR method will use Tier0 frame slot for generic context `this`.\n"); - return; - } - - // OSR method will use OSR frame slot to report `this` as context, - // and must initialize the slot. + // OSR method will use Tier0 slot to report `this` as context. // - JITDUMP("OSR method will report `this` generics context on its frame.\n"); + assert(ppInfo->HasKeptAliveThis()); + JITDUMP("OSR method will use Tier0 frame slot for generics context `this`.\n"); } + + return; } // We should report either generic context arg or "this" when used so. diff --git a/src/coreclr/jit/compiler.hpp b/src/coreclr/jit/compiler.hpp index 2379c0e0ac6ac2..d83c7024496192 100644 --- a/src/coreclr/jit/compiler.hpp +++ b/src/coreclr/jit/compiler.hpp @@ -1986,13 +1986,18 @@ inline bool Compiler::lvaKeepAliveAndReportThis() // the VM requires us to keep the generics context alive or it is used in a look-up. // We keep it alive in the lookup scenario, even when the VM didn't ask us to, // because collectible types need the generics context when gc-ing. + // + // Methoods that can inspire OSR methods must always report context as live + // if (genericsContextIsThis) { - const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; + const bool mustKeep = (info.compMethodInfo->options & CORINFO_GENERICS_CTXT_KEEP_ALIVE) != 0; + const bool hasPatchpoint = doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints(); - if (lvaGenericsContextInUse || mustKeep) + if (lvaGenericsContextInUse || mustKeep || hasPatchpoint) { - JITDUMP("Reporting this as generic context: %s\n", mustKeep ? "must keep" : "referenced"); + JITDUMP("Reporting this as generic context: %s\n", + mustKeep ? "must keep" : (hasPatchpoint ? "patchpoints" : "referenced")); return true; } } @@ -2024,6 +2029,13 @@ inline bool Compiler::lvaReportParamTypeArg() { return true; } + + // Methoods that have patchpoints always report context as live + // + if (doesMethodHavePatchpoints() || doesMethodHavePartialCompilationPatchpoints()) + { + return true; + } } // Otherwise, we don't need to report it -- the generics context parameter is unused. From 7ba31ebc7bcdd2124494be6d3f6e5012a77aa810 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 26 Jan 2022 09:42:36 -0800 Subject: [PATCH 4/7] * Add ability to suppress patchpoints by hash. * Dump IR after patchpoints phase, if any were added * Fix bug in recursive tail call detection -- if we devirtualize we need to update the method handle we use. This last change is the key bug fix -- OSR methods rely on getting this detection right so they know if they need to import the method entry block. --- src/coreclr/jit/importer.cpp | 39 +++++++++++++++++++++++++++++-- src/coreclr/jit/jitconfigvalues.h | 12 ++++++++-- src/coreclr/jit/phase.cpp | 31 ++++++++++++++++-------- 3 files changed, 68 insertions(+), 14 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index 08597695cf2d43..eb315d415404a7 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -9543,6 +9543,10 @@ var_types Compiler::impImportCall(OPCODE opcode, // Take care to pass raw IL offset here as the 'debug info' might be different for // inlinees. rawILOffset); + + // Devirtualization may change which method gets invoked. Update our local cache. + // + methHnd = callInfo->hMethod; } if (impIsThis(obj)) @@ -11806,9 +11810,40 @@ void Compiler::impImportBlockCode(BasicBlock* block) #ifdef FEATURE_ON_STACK_REPLACEMENT - // Is OSR enabled? + bool enablePatchpoints = opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0); + +#ifdef DEBUG + + // Optionally suppress patchpoints by method hash // - if (opts.jitFlags->IsSet(JitFlags::JIT_FLAG_TIER0) && (JitConfig.TC_OnStackReplacement() > 0)) + static ConfigMethodRange JitEnablePatchpointRange; + JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); + const unsigned hash = impInlineRoot()->info.compMethodHash(); + const bool bbbb = JitEnablePatchpointRange.Contains(hash); + + static bool firstYes = true; + if (enablePatchpoints && bbbb && firstYes) + { + printf("!!! PATCHPOINT RANGE\n"); + JitEnablePatchpointRange.Dump(); + printf("!!! ALLOW %s (hash 0x%08x)\n", info.compFullName, hash); + firstYes = false; + } + + static bool firstNo = true; + if (enablePatchpoints && !bbbb && firstNo) + { + printf("!!! PATCHPOINT RANGE\n"); + JitEnablePatchpointRange.Dump(); + printf("!!! DENY %s (hash 0x%08x)\n", info.compFullName, hash); + firstNo = false; + } + + enablePatchpoints &= bbbb; + +#endif // DEBUG + + if (enablePatchpoints) { // We don't inline at Tier0, if we do, we may need rethink our approach. // Could probably support inlines that don't introduce flow. diff --git a/src/coreclr/jit/jitconfigvalues.h b/src/coreclr/jit/jitconfigvalues.h index 4fc4594c6fb2b9..1346c2069da170 100644 --- a/src/coreclr/jit/jitconfigvalues.h +++ b/src/coreclr/jit/jitconfigvalues.h @@ -471,8 +471,16 @@ CONFIG_INTEGER(JitOffsetOnStackReplacement, W("JitOffsetOnStackReplacement"), -1 #endif // debug #if defined(DEBUG) -CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) // Enable osr for only some methods -#endif // debug +// EnableOsrRange allows you to limit the set of methods that will rely on OSR to escape +// from Tier0 code. Methods outside the range that would normally be jitted at Tier0 +// and have patchpoints will instead be switched to optimized. +CONFIG_STRING(JitEnableOsrRange, W("JitEnableOsrRange")) +// EnablePatchpointRange allows you to limit the set of Tier0 methods that +// will have patchpoints, and hence control which methods will create OSR methods. +// Unlike EnableOsrRange, it will not alter the optimization setting for methods +// outside the enabled range. +CONFIG_STRING(JitEnablePatchpointRange, W("JitEnablePatchpointRange")) +#endif // Profile instrumentation options CONFIG_INTEGER(JitMinimalJitProfiling, W("JitMinimalJitProfiling"), 1) diff --git a/src/coreclr/jit/phase.cpp b/src/coreclr/jit/phase.cpp index f08134bf11532b..7c580acb816811 100644 --- a/src/coreclr/jit/phase.cpp +++ b/src/coreclr/jit/phase.cpp @@ -162,16 +162,27 @@ void Phase::PostPhase(PhaseStatus status) // well as the new-style phases that have been updated to return // PhaseStatus from their DoPhase methods. // - static Phases s_allowlist[] = {PHASE_IMPORTATION, PHASE_IBCINSTR, - PHASE_IBCPREP, PHASE_INCPROFILE, - PHASE_INDXCALL, PHASE_MORPH_INLINE, - PHASE_ALLOCATE_OBJECTS, PHASE_EMPTY_TRY, - PHASE_EMPTY_FINALLY, PHASE_MERGE_FINALLY_CHAINS, - PHASE_CLONE_FINALLY, PHASE_MERGE_THROWS, - PHASE_MORPH_GLOBAL, PHASE_INVERT_LOOPS, - PHASE_OPTIMIZE_LAYOUT, PHASE_FIND_LOOPS, - PHASE_BUILD_SSA, PHASE_RATIONALIZE, - PHASE_LOWERING, PHASE_STACK_LEVEL_SETTER}; + static Phases s_allowlist[] = {PHASE_IMPORTATION, + PHASE_IBCINSTR, + PHASE_IBCPREP, + PHASE_INCPROFILE, + PHASE_INDXCALL, + PHASE_PATCHPOINTS, + PHASE_MORPH_INLINE, + PHASE_ALLOCATE_OBJECTS, + PHASE_EMPTY_TRY, + PHASE_EMPTY_FINALLY, + PHASE_MERGE_FINALLY_CHAINS, + PHASE_CLONE_FINALLY, + PHASE_MERGE_THROWS, + PHASE_MORPH_GLOBAL, + PHASE_INVERT_LOOPS, + PHASE_OPTIMIZE_LAYOUT, + PHASE_FIND_LOOPS, + PHASE_BUILD_SSA, + PHASE_RATIONALIZE, + PHASE_LOWERING, + PHASE_STACK_LEVEL_SETTER}; if (madeChanges) { From bfd1dd81bdec4dffcb41e0b29f23c27186f7b8ae Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 26 Jan 2022 09:55:41 -0800 Subject: [PATCH 5/7] remove some debug scaffolding --- src/coreclr/jit/importer.cpp | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index eb315d415404a7..aadb8d4c18d163 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -11818,28 +11818,9 @@ void Compiler::impImportBlockCode(BasicBlock* block) // static ConfigMethodRange JitEnablePatchpointRange; JitEnablePatchpointRange.EnsureInit(JitConfig.JitEnablePatchpointRange()); - const unsigned hash = impInlineRoot()->info.compMethodHash(); - const bool bbbb = JitEnablePatchpointRange.Contains(hash); - - static bool firstYes = true; - if (enablePatchpoints && bbbb && firstYes) - { - printf("!!! PATCHPOINT RANGE\n"); - JitEnablePatchpointRange.Dump(); - printf("!!! ALLOW %s (hash 0x%08x)\n", info.compFullName, hash); - firstYes = false; - } - - static bool firstNo = true; - if (enablePatchpoints && !bbbb && firstNo) - { - printf("!!! PATCHPOINT RANGE\n"); - JitEnablePatchpointRange.Dump(); - printf("!!! DENY %s (hash 0x%08x)\n", info.compFullName, hash); - firstNo = false; - } - - enablePatchpoints &= bbbb; + const unsigned hash = impInlineRoot()->info.compMethodHash(); + const bool inRange = JitEnablePatchpointRange.Contains(hash); + enablePatchpoints &= inRange; #endif // DEBUG From 2a04aae2a3487d3977bf9c71399a44d5a937ab18 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Wed, 26 Jan 2022 11:57:10 -0800 Subject: [PATCH 6/7] refine OSR enabling/backoff to opt logic --- src/coreclr/jit/compiler.cpp | 42 +++++++++++++++++++++--------------- src/coreclr/jit/importer.cpp | 8 +++---- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/src/coreclr/jit/compiler.cpp b/src/coreclr/jit/compiler.cpp index 56701ceb08fad3..3f29540e4f0eb0 100644 --- a/src/coreclr/jit/compiler.cpp +++ b/src/coreclr/jit/compiler.cpp @@ -6401,9 +6401,10 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, // Honor the config setting that tells the jit to // always optimize methods with loops. // - // If that's not set, and OSR is enabled, the jit may still + // If neither of those apply, and OSR is enabled, the jit may still // decide to optimize, if there's something in the method that - // OSR currently cannot handle. + // OSR currently cannot handle, or we're optionally suppressing + // OSR by method hash. // const char* reason = nullptr; @@ -6411,35 +6412,42 @@ int Compiler::compCompileHelper(CORINFO_MODULE_HANDLE classPtr, { reason = "tail.call and not BBINSTR"; } - else if ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0) + else if (compHasBackwardJump && ((info.compFlags & CORINFO_FLG_DISABLE_TIER0_FOR_LOOPS) != 0)) { - if (compHasBackwardJump) - { - reason = "loop"; - } + reason = "loop"; } - else if (JitConfig.TC_OnStackReplacement() > 0) + + if (compHasBackwardJump && (reason == nullptr) && (JitConfig.TC_OnStackReplacement() > 0)) { - const bool patchpointsOK = compCanHavePatchpoints(&reason); - assert(patchpointsOK || (reason != nullptr)); + const char* noPatchpointReason = nullptr; + bool canEscapeViaOSR = compCanHavePatchpoints(&reason); #ifdef DEBUG - // Optionally disable OSR by method hash. - // - if (patchpointsOK && compHasBackwardJump) + if (canEscapeViaOSR) { + // Optionally disable OSR by method hash. This will force any + // method that might otherwise get trapped in Tier0 to be optimized. + // static ConfigMethodRange JitEnableOsrRange; JitEnableOsrRange.EnsureInit(JitConfig.JitEnableOsrRange()); const unsigned hash = impInlineRoot()->info.compMethodHash(); if (!JitEnableOsrRange.Contains(hash)) { - JITDUMP("Disabling OSR -- Method hash 0x%08x not within range ", hash); - JITDUMPEXEC(JitEnableOsrRange.Dump()); - JITDUMP("\n"); - reason = "OSR disabled by JitEnableOsrRange"; + canEscapeViaOSR = false; + reason = "OSR disabled by JitEnableOsrRange"; } } #endif + + if (canEscapeViaOSR) + { + JITDUMP("\nOSR enabled for this method\n"); + } + else + { + JITDUMP("\nOSR disabled for this method: %s\n", noPatchpointReason); + assert(reason != nullptr); + } } if (reason != nullptr) diff --git a/src/coreclr/jit/importer.cpp b/src/coreclr/jit/importer.cpp index aadb8d4c18d163..83ec27f6528def 100644 --- a/src/coreclr/jit/importer.cpp +++ b/src/coreclr/jit/importer.cpp @@ -11839,13 +11839,13 @@ void Compiler::impImportBlockCode(BasicBlock* block) // if (!compTailPrefixSeen) { - assert(compCanHavePatchpoints()); - // The normaly policy is only to add patchpoints to the targets of lexically // backwards branches. // if (compHasBackwardJump) { + assert(compCanHavePatchpoints()); + // Is the start of this block a suitable patchpoint? // if (((block->bbFlags & BBF_BACKWARD_JUMP_TARGET) != 0) && (verCurrentState.esStackDepth == 0)) @@ -11876,8 +11876,8 @@ void Compiler::impImportBlockCode(BasicBlock* block) const bool tryOffsetOSR = offsetOSR >= 0; const bool tryRandomOSR = randomOSR > 0; - if ((tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && !block->hasHndIndex() && - ((block->bbFlags & BBF_PATCHPOINT) == 0)) + if (compCanHavePatchpoints() && (tryOffsetOSR || tryRandomOSR) && (verCurrentState.esStackDepth == 0) && + !block->hasHndIndex() && ((block->bbFlags & BBF_PATCHPOINT) == 0)) { // Block start can have a patchpoint. See if we should add one. // From 91856090ca44af59593546de9a3462767a01e926 Mon Sep 17 00:00:00 2001 From: Andy Ayers Date: Thu, 27 Jan 2022 14:47:29 -0800 Subject: [PATCH 7/7] reorder phase whitelist to more closely match phase order --- src/coreclr/jit/phase.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/coreclr/jit/phase.cpp b/src/coreclr/jit/phase.cpp index f0ea25e4774f55..5f2e2172c6f4a5 100644 --- a/src/coreclr/jit/phase.cpp +++ b/src/coreclr/jit/phase.cpp @@ -167,12 +167,12 @@ void Phase::PostPhase(PhaseStatus status) // clang-format off static Phases s_allowlist[] = { + PHASE_INCPROFILE, + PHASE_IBCPREP, PHASE_IMPORTATION, + PHASE_PATCHPOINTS, PHASE_IBCINSTR, - PHASE_IBCPREP, - PHASE_INCPROFILE, PHASE_INDXCALL, - PHASE_PATCHPOINTS, PHASE_MORPH_INLINE, PHASE_ALLOCATE_OBJECTS, PHASE_EMPTY_TRY,