Skip to content

Commit 76acd04

Browse files
authored
JIT: Switch arm32 call-finally to be similar to other targets (#95117)
This switches arm32 to generate call-finally in the same way as other targets, with a call to the finally funclet, instead of loading a different return address. Loading a different return address confuses the hardware's return address predictor, which has large negative perf impact. Two micro benchmarks from my rpi, both run with `DOTNET_JitEnableFinallyCloning=0`, that show the cost of messing up return address prediction. The first runs a loop that calls a finally funclet on every iteration. The second sets up a deeper stack and calls a funclet, which means that the old scheme then mispredicts the return for all subsequent returns. The second benchmark is over 4 times faster with this change. ```csharp public static void Main() { for (int i = 0; i < 4; i++) { for (int j = 0; j < 100; j++) { Run(100); Call1(); } Thread.Sleep(100); } Stopwatch timer = Stopwatch.StartNew(); Run(100_000_000); timer.Stop(); Console.WriteLine("Elapsed: {0}ms", timer.ElapsedMilliseconds); timer.Restart(); for (int i = 0; i < 100_000_000; i++) Call1(); timer.Stop(); Console.WriteLine("Elapsed: {0}ms", timer.ElapsedMilliseconds); } public static long Run(int iters) { long sum = 0; for (int i = 0; i < iters; i++) { try { sum += i; } finally { sum += 2 * i; } } return sum; } [MethodImpl(MethodImplOptions.NoInlining)] public static int Call1() => Call2() + 1; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call2() => Call3() + 2; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call3() => Call4() + 3; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call4() => Call5() + 4; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call5() => Call6() + 5; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call6() => Call7() + 6; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call7() => Call8() + 7; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call8() => Call9() + 8; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call9() => Call10() + 9; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call10() => Call11() + 10; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call11() => Call12() + 11; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call12() => Call13() + 12; [MethodImpl(MethodImplOptions.NoInlining)] public static int Call13() => Finally(); public static int Finally() { int result = 10; try { result += 5; } finally { result += 10; } return result; } ``` Output before: ```scala Elapsed: 916ms Elapsed: 18322ms ``` Output after: ```scala Elapsed: 856ms Elapsed: 4010ms ``` Fix #59453 Fix #66578
1 parent 08bef8e commit 76acd04

20 files changed

+83
-573
lines changed

src/coreclr/jit/block.cpp

Lines changed: 0 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -520,12 +520,6 @@ void BasicBlock::dspFlags()
520520
{
521521
printf("nullcheck ");
522522
}
523-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
524-
if (bbFlags & BBF_FINALLY_TARGET)
525-
{
526-
printf("ftarget ");
527-
}
528-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
529523
if (bbFlags & BBF_BACKWARD_JUMP)
530524
{
531525
printf("bwd ");
@@ -1671,16 +1665,8 @@ BasicBlock* BasicBlock::New(Compiler* compiler, BBjumpKinds jumpKind, unsigned j
16711665
//
16721666
bool BasicBlock::isBBCallAlwaysPair() const
16731667
{
1674-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
1675-
if (this->KindIs(BBJ_CALLFINALLY))
1676-
#else
16771668
if (this->KindIs(BBJ_CALLFINALLY) && !(this->bbFlags & BBF_RETLESS_CALL))
1678-
#endif
16791669
{
1680-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
1681-
// On ARM, there are no retless BBJ_CALLFINALLY.
1682-
assert(!(this->bbFlags & BBF_RETLESS_CALL));
1683-
#endif
16841670
// Some asserts that the next block is a BBJ_ALWAYS of the proper form.
16851671
assert(!this->IsLast());
16861672
assert(this->Next()->KindIs(BBJ_ALWAYS));

src/coreclr/jit/block.h

Lines changed: 17 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -409,38 +409,27 @@ enum BasicBlockFlags : unsigned __int64
409409
BBF_HAS_MDARRAYREF = MAKE_BBFLAG(24), // Block has a multi-dimensional array reference
410410
BBF_HAS_NEWOBJ = MAKE_BBFLAG(25), // BB contains 'new' of an object type.
411411

412-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
413-
414-
BBF_FINALLY_TARGET = MAKE_BBFLAG(26), // BB is the target of a finally return: where a finally will return during
415-
// non-exceptional flow. Because the ARM calling sequence for calling a
416-
// finally explicitly sets the return address to the finally target and jumps
417-
// to the finally, instead of using a call instruction, ARM needs this to
418-
// generate correct code at the finally target, to allow for proper stack
419-
// unwind from within a non-exceptional call to a finally.
420-
421-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
422-
423-
BBF_RETLESS_CALL = MAKE_BBFLAG(27), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired
412+
BBF_RETLESS_CALL = MAKE_BBFLAG(26), // BBJ_CALLFINALLY that will never return (and therefore, won't need a paired
424413
// BBJ_ALWAYS); see isBBCallAlwaysPair().
425-
BBF_LOOP_PREHEADER = MAKE_BBFLAG(28), // BB is a loop preheader block
426-
BBF_COLD = MAKE_BBFLAG(29), // BB is cold
427-
BBF_PROF_WEIGHT = MAKE_BBFLAG(30), // BB weight is computed from profile data
428-
BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(31), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind
414+
BBF_LOOP_PREHEADER = MAKE_BBFLAG(27), // BB is a loop preheader block
415+
BBF_COLD = MAKE_BBFLAG(28), // BB is cold
416+
BBF_PROF_WEIGHT = MAKE_BBFLAG(29), // BB weight is computed from profile data
417+
BBF_KEEP_BBJ_ALWAYS = MAKE_BBFLAG(30), // A special BBJ_ALWAYS block, used by EH code generation. Keep the jump kind
429418
// as BBJ_ALWAYS. Used for the paired BBJ_ALWAYS block following the
430419
// BBJ_CALLFINALLY block, as well as, on x86, the final step block out of a
431420
// finally.
432-
BBF_HAS_CALL = MAKE_BBFLAG(32), // BB contains a call
433-
BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(33), // Block is dominated by exceptional entry.
434-
BBF_BACKWARD_JUMP = MAKE_BBFLAG(34), // BB is surrounded by a backward jump/switch arc
435-
BBF_BACKWARD_JUMP_SOURCE = MAKE_BBFLAG(35), // Block is a source of a backward jump
436-
BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(36), // Block is a target of a backward jump
437-
BBF_PATCHPOINT = MAKE_BBFLAG(37), // Block is a patchpoint
438-
BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(38), // Block is a partial compilation patchpoint
439-
BBF_HAS_HISTOGRAM_PROFILE = MAKE_BBFLAG(39), // BB contains a call needing a histogram profile
440-
BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(40), // BB has pred that has potential tail call
441-
BBF_RECURSIVE_TAILCALL = MAKE_BBFLAG(41), // Block has recursive tailcall that may turn into a loop
442-
BBF_NO_CSE_IN = MAKE_BBFLAG(42), // Block should kill off any incoming CSE
443-
BBF_CAN_ADD_PRED = MAKE_BBFLAG(43), // Ok to add pred edge to this block, even when "safe" edge creation disabled
421+
BBF_HAS_CALL = MAKE_BBFLAG(31), // BB contains a call
422+
BBF_DOMINATED_BY_EXCEPTIONAL_ENTRY = MAKE_BBFLAG(32), // Block is dominated by exceptional entry.
423+
BBF_BACKWARD_JUMP = MAKE_BBFLAG(33), // BB is surrounded by a backward jump/switch arc
424+
BBF_BACKWARD_JUMP_SOURCE = MAKE_BBFLAG(34), // Block is a source of a backward jump
425+
BBF_BACKWARD_JUMP_TARGET = MAKE_BBFLAG(35), // Block is a target of a backward jump
426+
BBF_PATCHPOINT = MAKE_BBFLAG(36), // Block is a patchpoint
427+
BBF_PARTIAL_COMPILATION_PATCHPOINT = MAKE_BBFLAG(37), // Block is a partial compilation patchpoint
428+
BBF_HAS_HISTOGRAM_PROFILE = MAKE_BBFLAG(38), // BB contains a call needing a histogram profile
429+
BBF_TAILCALL_SUCCESSOR = MAKE_BBFLAG(39), // BB has pred that has potential tail call
430+
BBF_RECURSIVE_TAILCALL = MAKE_BBFLAG(40), // Block has recursive tailcall that may turn into a loop
431+
BBF_NO_CSE_IN = MAKE_BBFLAG(41), // Block should kill off any incoming CSE
432+
BBF_CAN_ADD_PRED = MAKE_BBFLAG(42), // Ok to add pred edge to this block, even when "safe" edge creation disabled
444433

445434
// The following are sets of flags.
446435

@@ -1234,10 +1223,6 @@ struct BasicBlock : private LIR::Range
12341223

12351224
void* bbEmitCookie;
12361225

1237-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
1238-
void* bbUnwindNopEmitCookie;
1239-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
1240-
12411226
#ifdef VERIFIER
12421227
stackDesc bbStackIn; // stack descriptor for input
12431228
stackDesc bbStackOut; // stack descriptor for output

src/coreclr/jit/codegen.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -626,9 +626,6 @@ class CodeGen final : public CodeGenInterface
626626
void genSetPSPSym(regNumber initReg, bool* pInitRegZeroed);
627627

628628
void genUpdateCurrentFunclet(BasicBlock* block);
629-
#if defined(TARGET_ARM)
630-
void genInsertNopForUnwinder(BasicBlock* block);
631-
#endif
632629

633630
#else // !FEATURE_EH_FUNCLETS
634631

src/coreclr/jit/codegenarm.cpp

Lines changed: 39 additions & 45 deletions
Original file line numberDiff line numberDiff line change
@@ -117,33 +117,55 @@ bool CodeGen::genStackPointerAdjustment(ssize_t spDelta, regNumber tmpReg)
117117
//
118118
BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
119119
{
120-
BasicBlock* bbFinallyRet = nullptr;
120+
GetEmitter()->emitIns_J(INS_bl, block->GetJumpDest());
121121

122-
// We don't have retless calls, since we use the BBJ_ALWAYS to point at a NOP pad where
123-
// we would have otherwise created retless calls.
124-
assert(block->isBBCallAlwaysPair());
122+
BasicBlock* nextBlock = block->Next();
125123

126-
assert(!block->IsLast());
127-
assert(block->Next()->KindIs(BBJ_ALWAYS));
128-
assert(block->Next()->HasJump());
129-
assert(block->Next()->GetJumpDest()->bbFlags & BBF_FINALLY_TARGET);
124+
if (block->bbFlags & BBF_RETLESS_CALL)
125+
{
126+
if ((nextBlock == nullptr) || !BasicBlock::sameEHRegion(block, nextBlock))
127+
{
128+
instGen(INS_BREAKPOINT);
129+
}
130+
}
131+
else
132+
{
133+
assert((nextBlock != nullptr) && nextBlock->isBBCallAlwaysPairTail());
134+
135+
// Because of the way the flowgraph is connected, the liveness info for this one instruction
136+
// after the call is not (can not be) correct in cases where a variable has a last use in the
137+
// handler. So turn off GC reporting for this single instruction.
138+
GetEmitter()->emitDisableGC();
130139

131-
bbFinallyRet = block->Next()->GetJumpDest();
140+
BasicBlock* const jumpDest = nextBlock->GetJumpDest();
132141

133-
// Load the address where the finally funclet should return into LR.
134-
// The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
135-
genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
142+
// Now go to where the finally funclet needs to return to.
143+
if (nextBlock->NextIs(jumpDest) && !compiler->fgInDifferentRegions(nextBlock, jumpDest))
144+
{
145+
// Fall-through.
146+
// TODO-ARM-CQ: Can we get rid of this instruction, and just have the call return directly
147+
// to the next instruction? This would depend on stack walking from within the finally
148+
// handler working without this instruction being in this special EH region.
149+
instGen(INS_nop);
150+
}
151+
else
152+
{
153+
GetEmitter()->emitIns_J(INS_b, jumpDest);
154+
}
136155

137-
// Jump to the finally BB
138-
inst_JMP(EJ_jmp, block->GetJumpDest());
156+
GetEmitter()->emitEnableGC();
157+
}
139158

140159
// The BBJ_ALWAYS is used because the BBJ_CALLFINALLY can't point to the
141160
// jump target using bbJumpDest - that is already used to point
142161
// to the finally block. So just skip past the BBJ_ALWAYS unless the
143162
// block is RETLESS.
144-
assert(!(block->bbFlags & BBF_RETLESS_CALL));
145-
assert(block->isBBCallAlwaysPair());
146-
return block->Next();
163+
if (!(block->bbFlags & BBF_RETLESS_CALL))
164+
{
165+
assert(block->isBBCallAlwaysPair());
166+
block = nextBlock;
167+
}
168+
return block;
147169
}
148170

149171
//------------------------------------------------------------------------
@@ -2572,34 +2594,6 @@ void CodeGen::genSetPSPSym(regNumber initReg, bool* pInitRegZeroed)
25722594
GetEmitter()->emitIns_S_R(INS_str, EA_PTRSIZE, regTmp, compiler->lvaPSPSym, 0);
25732595
}
25742596

2575-
void CodeGen::genInsertNopForUnwinder(BasicBlock* block)
2576-
{
2577-
// If this block is the target of a finally return, we need to add a preceding NOP, in the same EH region,
2578-
// so the unwinder doesn't get confused by our "movw lr, xxx; movt lr, xxx; b Lyyy" calling convention that
2579-
// calls the funclet during non-exceptional control flow.
2580-
if (block->bbFlags & BBF_FINALLY_TARGET)
2581-
{
2582-
assert(block->bbFlags & BBF_HAS_LABEL);
2583-
2584-
#ifdef DEBUG
2585-
if (compiler->verbose)
2586-
{
2587-
printf("\nEmitting finally target NOP predecessor for " FMT_BB "\n", block->bbNum);
2588-
}
2589-
#endif
2590-
// Create a label that we'll use for computing the start of an EH region, if this block is
2591-
// at the beginning of such a region. If we used the existing bbEmitCookie as is for
2592-
// determining the EH regions, then this NOP would end up outside of the region, if this
2593-
// block starts an EH region. If we pointed the existing bbEmitCookie here, then the NOP
2594-
// would be executed, which we would prefer not to do.
2595-
2596-
block->bbUnwindNopEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
2597-
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
2598-
2599-
instGen(INS_nop);
2600-
}
2601-
}
2602-
26032597
//-----------------------------------------------------------------------------
26042598
// genZeroInitFrameUsingBlockInit: architecture-specific helper for genZeroInitFrame in the case
26052599
// `genUseBlockInit` is set.

src/coreclr/jit/codegencommon.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -909,7 +909,7 @@ void CodeGen::genDefineTempLabel(BasicBlock* label)
909909
{
910910
genLogLabel(label);
911911
label->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
912-
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(label));
912+
gcInfo.gcRegByrefSetCur DEBUG_ARG(label));
913913
}
914914

915915
// genDefineInlineTempLabel: Define an inline label that does not affect the GC

src/coreclr/jit/codegenlinear.cpp

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -290,10 +290,6 @@ void CodeGen::genCodeForBBlist()
290290
}
291291
}
292292

293-
#if defined(TARGET_ARM)
294-
genInsertNopForUnwinder(block);
295-
#endif
296-
297293
/* Start a new code output block */
298294

299295
genUpdateCurrentFunclet(block);
@@ -351,7 +347,7 @@ void CodeGen::genCodeForBBlist()
351347
// Mark a label and update the current set of live GC refs
352348

353349
block->bbEmitCookie = GetEmitter()->emitAddLabel(gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
354-
gcInfo.gcRegByrefSetCur, false DEBUG_ARG(block));
350+
gcInfo.gcRegByrefSetCur DEBUG_ARG(block));
355351
}
356352

357353
if (block->IsFirstColdBlock(compiler))

src/coreclr/jit/compiler.cpp

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -4613,14 +4613,6 @@ void Compiler::compCompile(void** methodCodePtr, uint32_t* methodCodeSize, JitFl
46134613
//
46144614
DoPhase(this, PHASE_CLONE_FINALLY, &Compiler::fgCloneFinally);
46154615

4616-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4617-
4618-
// Update finally target flags after EH optimizations
4619-
//
4620-
DoPhase(this, PHASE_UPDATE_FINALLY_FLAGS, &Compiler::fgUpdateFinallyTargetFlags);
4621-
4622-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4623-
46244616
#if DEBUG
46254617
if (lvaEnregEHVars)
46264618
{

src/coreclr/jit/compiler.h

Lines changed: 0 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -4640,13 +4640,6 @@ class Compiler
46404640

46414641
BlockSet fgEnterBlks; // Set of blocks which have a special transfer of control; the "entry" blocks plus EH handler
46424642
// begin blocks.
4643-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4644-
BlockSet fgAlwaysBlks; // Set of blocks which are BBJ_ALWAYS part of BBJ_CALLFINALLY/BBJ_ALWAYS pair that should
4645-
// never be removed due to a requirement to use the BBJ_ALWAYS for generating code and
4646-
// not have "retless" blocks.
4647-
4648-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4649-
46504643
#ifdef DEBUG
46514644
bool fgReachabilitySetsValid; // Are the bbReach sets valid?
46524645
bool fgEnterBlksSetValid; // Is the fgEnterBlks set valid?
@@ -4745,17 +4738,6 @@ class Compiler
47454738

47464739
void fgCleanupContinuation(BasicBlock* continuation);
47474740

4748-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4749-
4750-
PhaseStatus fgUpdateFinallyTargetFlags();
4751-
4752-
void fgClearAllFinallyTargetBits();
4753-
4754-
void fgAddFinallyTargetFlags();
4755-
4756-
void fgFixFinallyTargetFlags(BasicBlock* pred, BasicBlock* succ, BasicBlock* newBlock);
4757-
4758-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
47594741
PhaseStatus fgTailMergeThrows();
47604742
void fgTailMergeThrowsFallThroughHelper(BasicBlock* predBlock,
47614743
BasicBlock* nonCanonicalBlock,
@@ -5552,9 +5534,6 @@ class Compiler
55525534
BasicBlock* fgRelocateEHRange(unsigned regionIndex, FG_RELOCATE_TYPE relocateType);
55535535

55545536
#if defined(FEATURE_EH_FUNCLETS)
5555-
#if defined(TARGET_ARM)
5556-
void fgClearFinallyTargetBit(BasicBlock* block);
5557-
#endif // defined(TARGET_ARM)
55585537
bool fgIsIntraHandlerPred(BasicBlock* predBlock, BasicBlock* block);
55595538
bool fgAnyIntraHandlerPreds(BasicBlock* block);
55605539
void fgInsertFuncletPrologBlock(BasicBlock* block);

src/coreclr/jit/emit.cpp

Lines changed: 1 addition & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -1865,11 +1865,6 @@ void emitter::emitCheckIGList()
18651865
// Extension groups don't store GC info.
18661866
assert((currIG->igFlags & (IGF_GC_VARS | IGF_BYREF_REGS)) == 0);
18671867

1868-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
1869-
// Extension groups can't be branch targets.
1870-
assert((currIG->igFlags & IGF_FINALLY_TARGET) == 0);
1871-
#endif
1872-
18731868
// TODO: It would be nice if we could assert that a funclet prolog, funclet epilog, or
18741869
// function epilog could only extend one of the same type. However, epilogs are created
18751870
// using emitCreatePlaceholderIG() and might be in EXTEND groups. Can we force them to
@@ -2900,8 +2895,7 @@ bool emitter::emitNoGChelper(CORINFO_METHOD_HANDLE methHnd)
29002895

29012896
void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars,
29022897
regMaskTP gcrefRegs,
2903-
regMaskTP byrefRegs,
2904-
bool isFinallyTarget DEBUG_ARG(BasicBlock* block))
2898+
regMaskTP byrefRegs DEBUG_ARG(BasicBlock* block))
29052899
{
29062900
/* Create a new IG if the current one is non-empty */
29072901

@@ -2925,13 +2919,6 @@ void* emitter::emitAddLabel(VARSET_VALARG_TP GCvars,
29252919
emitThisGCrefRegs = emitInitGCrefRegs = gcrefRegs;
29262920
emitThisByrefRegs = emitInitByrefRegs = byrefRegs;
29272921

2928-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
2929-
if (isFinallyTarget)
2930-
{
2931-
emitCurIG->igFlags |= IGF_FINALLY_TARGET;
2932-
}
2933-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
2934-
29352922
#ifdef DEBUG
29362923
if (EMIT_GC_VERBOSE)
29372924
{
@@ -3997,12 +3984,6 @@ void emitter::emitDispIGflags(unsigned flags)
39973984
{
39983985
printf(", byref");
39993986
}
4000-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
4001-
if (flags & IGF_FINALLY_TARGET)
4002-
{
4003-
printf(", ftarget");
4004-
}
4005-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
40063987
if (flags & IGF_FUNCLET_PROLOG)
40073988
{
40083989
printf(", funclet prolog");
@@ -7125,11 +7106,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
71257106
NO_WAY("Too many instruction groups");
71267107
}
71277108

7128-
// If this instruction group is returned to from a funclet implementing a finally,
7129-
// on architectures where it is necessary generate GC info for the current instruction as
7130-
// if it were the instruction following a call.
7131-
emitGenGCInfoIfFuncletRetTarget(ig, cp);
7132-
71337109
instrDesc* id = emitFirstInstrDesc(ig->igData);
71347110

71357111
#ifdef DEBUG
@@ -7658,29 +7634,6 @@ unsigned emitter::emitEndCodeGen(Compiler* comp,
76587634
return actualCodeSize;
76597635
}
76607636

7661-
// See specification comment at the declaration.
7662-
void emitter::emitGenGCInfoIfFuncletRetTarget(insGroup* ig, BYTE* cp)
7663-
{
7664-
#if defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
7665-
// We only emit this GC information on targets where finally's are implemented via funclets,
7666-
// and the finally is invoked, during non-exceptional execution, via a branch with a predefined
7667-
// link register, rather than a "true call" for which we would already generate GC info. Currently,
7668-
// this means precisely ARM.
7669-
if (ig->igFlags & IGF_FINALLY_TARGET)
7670-
{
7671-
// We don't actually have a call instruction in this case, so we don't have
7672-
// a real size for that instruction. We'll use 1.
7673-
emitStackPop(cp, /*isCall*/ true, /*callInstrSize*/ 1, /*args*/ 0);
7674-
7675-
/* Do we need to record a call location for GC purposes? */
7676-
if (!emitFullGCinfo)
7677-
{
7678-
emitRecordGCcall(cp, /*callInstrSize*/ 1);
7679-
}
7680-
}
7681-
#endif // defined(FEATURE_EH_FUNCLETS) && defined(TARGET_ARM)
7682-
}
7683-
76847637
/*****************************************************************************
76857638
*
76867639
* We have an instruction in an insGroup and we need to know the

0 commit comments

Comments
 (0)