Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ public static long Decrement(ref long location) =>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int Exchange(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return Exchange(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -130,7 +130,7 @@ public static T Exchange<T>([NotNullIfNotNull(nameof(value))] ref T location1, T
[MethodImpl(MethodImplOptions.AggressiveInlining)]
public static int CompareExchange(ref int location1, int value, int comparand)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return CompareExchange(ref location1, value, comparand); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down Expand Up @@ -229,7 +229,7 @@ public static long Add(ref long location1, long value) =>
[MethodImpl(MethodImplOptions.AggressiveInlining)]
private static int ExchangeAdd(ref int location1, int value)
{
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_RISCV64
#if TARGET_X86 || TARGET_AMD64 || TARGET_ARM64 || TARGET_ARM || TARGET_RISCV64
return ExchangeAdd(ref location1, value); // Must expand intrinsic
#else
if (Unsafe.IsNullRef(ref location1))
Expand Down
5 changes: 5 additions & 0 deletions src/coreclr/jit/codegenarm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ bool CodeGen::genInstrWithConstant(
{
case INS_add:
case INS_sub:
if (imm < 0)
{
imm = -imm;
ins = (ins == INS_add) ? INS_sub : INS_add;
}
immFitsInIns = validImmForInstr(ins, (target_ssize_t)imm, flags);
break;

Expand Down
321 changes: 0 additions & 321 deletions src/coreclr/jit/codegenarm64.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3820,327 +3820,6 @@ void CodeGen::genJumpTable(GenTree* treeNode)
genProduceReg(treeNode);
}

//------------------------------------------------------------------------
// genLockedInstructions: Generate code for a GT_XADD, GT_XAND, GT_XORR or GT_XCHG node.
//
// Arguments:
// treeNode - the GT_XADD/XAND/XORR/XCHG node
//
void CodeGen::genLockedInstructions(GenTreeOp* treeNode)
{
GenTree* data = treeNode->AsOp()->gtOp2;
GenTree* addr = treeNode->AsOp()->gtOp1;
regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);

assert(treeNode->OperIs(GT_XCHG) || !varTypeIsSmall(treeNode->TypeGet()));

emitAttr dataSize = emitActualTypeSize(data);

if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
assert(!data->isContainedIntOrIImmed());

switch (treeNode->gtOper)
{
case GT_XORR:
GetEmitter()->emitIns_R_R_R(INS_ldsetal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
case GT_XAND:
{
// Grab a temp reg to perform `MVN` for dataReg first.
regNumber tempReg = internalRegisters.GetSingle(treeNode);
GetEmitter()->emitIns_R_R(INS_mvn, dataSize, tempReg, dataReg);
GetEmitter()->emitIns_R_R_R(INS_ldclral, dataSize, tempReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
}
case GT_XCHG:
{
instruction ins = INS_swpal;
if (varTypeIsByte(treeNode->TypeGet()))
{
ins = INS_swpalb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
ins = INS_swpalh;
}
GetEmitter()->emitIns_R_R_R(ins, dataSize, dataReg, targetReg, addrReg);
break;
}
case GT_XADD:
GetEmitter()->emitIns_R_R_R(INS_ldaddal, dataSize, dataReg, (targetReg == REG_NA) ? REG_ZR : targetReg,
addrReg);
break;
default:
assert(!"Unexpected treeNode->gtOper");
}
}
else
{
// These are imported normally if Atomics aren't supported.
assert(!treeNode->OperIs(GT_XORR, GT_XAND));

regNumber exResultReg = internalRegisters.Extract(treeNode, RBM_ALLINT);
regNumber storeDataReg =
(treeNode->OperGet() == GT_XCHG) ? dataReg : internalRegisters.Extract(treeNode, RBM_ALLINT);
regNumber loadReg = (targetReg != REG_NA) ? targetReg : storeDataReg;

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);

noway_assert(addrReg != loadReg);
noway_assert(dataReg != loadReg);

noway_assert(addrReg != storeDataReg);
noway_assert((treeNode->OperGet() == GT_XCHG) || (addrReg != dataReg));

assert(addr->isUsedFromReg());
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);
noway_assert((targetReg != REG_NA) || (treeNode->OperGet() != GT_XCHG));

// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != storeDataReg);
noway_assert(exResultReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
// retry:
// ldxr loadReg, [addrReg]
// add storeDataReg, loadReg, dataReg # Only for GT_XADD
// # GT_XCHG storeDataReg === dataReg
// stxr exResult, storeDataReg, [addrReg]
// cbnz exResult, retry
// dmb ish

BasicBlock* labelRetry = genCreateTempLabel();
genDefineTempLabel(labelRetry);

instruction insLd = INS_ldaxr;
instruction insSt = INS_stlxr;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldaxrb;
insSt = INS_stlxrb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldaxrh;
insSt = INS_stlxrh;
}

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, loadReg, addrReg);

switch (treeNode->OperGet())
{
case GT_XADD:
if (data->isContainedIntOrIImmed())
{
// Even though INS_add is specified here, the encoder will choose either
// an INS_add or an INS_sub and encode the immediate as a positive value
genInstrWithConstant(INS_add, dataSize, storeDataReg, loadReg, data->AsIntConCommon()->IconValue(),
REG_NA);
}
else
{
GetEmitter()->emitIns_R_R_R(INS_add, dataSize, storeDataReg, loadReg, dataReg);
}
break;
case GT_XCHG:
assert(!data->isContained());
storeDataReg = dataReg;
break;
default:
unreached();
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, exResultReg, storeDataReg, addrReg);

GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}

if (targetReg != REG_NA)
{
if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

genProduceReg(treeNode);
}
}

//------------------------------------------------------------------------
// genCodeForCmpXchg: Produce code for a GT_CMPXCHG node.
//
// Arguments:
// tree - the GT_CMPXCHG node
//
void CodeGen::genCodeForCmpXchg(GenTreeCmpXchg* treeNode)
{
assert(treeNode->OperIs(GT_CMPXCHG));

GenTree* addr = treeNode->Addr(); // arg1
GenTree* data = treeNode->Data(); // arg2
GenTree* comparand = treeNode->Comparand(); // arg3

regNumber targetReg = treeNode->GetRegNum();
regNumber dataReg = data->GetRegNum();
regNumber addrReg = addr->GetRegNum();
regNumber comparandReg = comparand->GetRegNum();

genConsumeAddress(addr);
genConsumeRegs(data);
genConsumeRegs(comparand);

emitAttr dataSize = emitActualTypeSize(data);

if (compiler->compOpportunisticallyDependsOn(InstructionSet_Atomics))
{
// casal use the comparand as the target reg
GetEmitter()->emitIns_Mov(INS_mov, dataSize, targetReg, comparandReg, /* canSkip */ true);

// Catch case we destroyed data or address before use
noway_assert((addrReg != targetReg) || (targetReg == comparandReg));
noway_assert((dataReg != targetReg) || (targetReg == comparandReg));

instruction ins = INS_casal;
if (varTypeIsByte(treeNode->TypeGet()))
{
ins = INS_casalb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
ins = INS_casalh;
}
GetEmitter()->emitIns_R_R_R(ins, dataSize, targetReg, dataReg, addrReg);
}
else
{
regNumber exResultReg = internalRegisters.Extract(treeNode, RBM_ALLINT);

// Check allocator assumptions
//
// The register allocator should have extended the lifetimes of all input and internal registers so that
// none interfere with the target.
noway_assert(addrReg != targetReg);
noway_assert(dataReg != targetReg);
noway_assert(comparandReg != targetReg);
noway_assert(addrReg != dataReg);
noway_assert(targetReg != REG_NA);
noway_assert(exResultReg != REG_NA);
noway_assert(exResultReg != targetReg);

assert(addr->isUsedFromReg());
assert(data->isUsedFromReg());
assert(!comparand->isUsedFromMemory());

// Store exclusive unpredictable cases must be avoided
noway_assert(exResultReg != dataReg);
noway_assert(exResultReg != addrReg);

// NOTE: `genConsumeAddress` marks the consumed register as not a GC pointer, as it assumes that the input
// registers
// die at the first instruction generated by the node. This is not the case for these atomics as the input
// registers are multiply-used. As such, we need to mark the addr register as containing a GC pointer until
// we are finished generating the code for this node.

gcInfo.gcMarkRegPtrVal(addrReg, addr->TypeGet());

// Emit code like this:
// retry:
// ldxr targetReg, [addrReg]
// cmp targetReg, comparandReg
// bne compareFail
// stxr exResult, dataReg, [addrReg]
// cbnz exResult, retry
// compareFail:
// dmb ish

BasicBlock* labelRetry = genCreateTempLabel();
BasicBlock* labelCompareFail = genCreateTempLabel();
genDefineTempLabel(labelRetry);

instruction insLd = INS_ldaxr;
instruction insSt = INS_stlxr;
if (varTypeIsByte(treeNode->TypeGet()))
{
insLd = INS_ldaxrb;
insSt = INS_stlxrb;
}
else if (varTypeIsShort(treeNode->TypeGet()))
{
insLd = INS_ldaxrh;
insSt = INS_stlxrh;
}

// The following instruction includes a acquire half barrier
GetEmitter()->emitIns_R_R(insLd, dataSize, targetReg, addrReg);

if (comparand->isContainedIntOrIImmed())
{
if (comparand->IsIntegralConst(0))
{
GetEmitter()->emitIns_J_R(INS_cbnz, emitActualTypeSize(treeNode), labelCompareFail, targetReg);
}
else
{
GetEmitter()->emitIns_R_I(INS_cmp, emitActualTypeSize(treeNode), targetReg,
comparand->AsIntConCommon()->IconValue());
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}
}
else
{
GetEmitter()->emitIns_R_R(INS_cmp, emitActualTypeSize(treeNode), targetReg, comparandReg);
GetEmitter()->emitIns_J(INS_bne, labelCompareFail);
}

// The following instruction includes a release half barrier
GetEmitter()->emitIns_R_R_R(insSt, dataSize, exResultReg, dataReg, addrReg);

GetEmitter()->emitIns_J_R(INS_cbnz, EA_4BYTE, labelRetry, exResultReg);

genDefineTempLabel(labelCompareFail);

instGen_MemoryBarrier();

gcInfo.gcMarkRegSetNpt(addr->gtGetRegMask());
}

if (varTypeIsSmall(treeNode->TypeGet()) && varTypeIsSigned(treeNode->TypeGet()))
{
instruction mov = varTypeIsShort(treeNode->TypeGet()) ? INS_sxth : INS_sxtb;
GetEmitter()->emitIns_Mov(mov, EA_4BYTE, targetReg, targetReg, /* canSkip */ false);
}

genProduceReg(treeNode);
}

instruction CodeGen::genGetInsForOper(genTreeOps oper, var_types type)
{
instruction ins = INS_BREAKPOINT;
Expand Down
Loading