summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2018-11-29 20:43:42 +0000
committerAlex Bradbury <asb@lowrisc.org>2018-11-29 20:43:42 +0000
commit66d9a752b9c4c7de32f21f921d1678799a254259 (patch)
tree883e834f03d6c63309867b074bdb2658180989b9 /llvm/lib/CodeGen
parent7eb1c2864fb3b3482432bb979f40c740ca99f023 (diff)
downloadbcm5719-llvm-66d9a752b9c4c7de32f21f921d1678799a254259.tar.gz
bcm5719-llvm-66d9a752b9c4c7de32f21f921d1678799a254259.zip
[RISCV] Implement codegen for cmpxchg on RV32IA
Utilise a similar ('late') lowering strategy to D47882. The changes to AtomicExpandPass allow this strategy to be utilised by other targets which implement shouldExpandAtomicCmpXchgInIR. All cmpxchg are lowered as 'strong' currently and failure ordering is ignored. This is conservative but correct. Differential Revision: https://reviews.llvm.org/D48131 llvm-svn: 347914
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp34
1 files changed, 32 insertions, 2 deletions
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index c1b98794015..95581c09dd1 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -91,6 +91,7 @@ namespace {
AtomicRMWInst *widenPartwordAtomicRMW(AtomicRMWInst *AI);
void expandPartwordCmpXchg(AtomicCmpXchgInst *I);
void expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI);
+ void expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI);
AtomicCmpXchgInst *convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI);
static Value *insertRMWCmpXchgLoop(
@@ -944,6 +945,35 @@ void AtomicExpand::expandAtomicRMWToMaskedIntrinsic(AtomicRMWInst *AI) {
AI->eraseFromParent();
}
+void AtomicExpand::expandAtomicCmpXchgToMaskedIntrinsic(AtomicCmpXchgInst *CI) {
+ IRBuilder<> Builder(CI);
+
+ PartwordMaskValues PMV = createMaskInstrs(
+ Builder, CI, CI->getCompareOperand()->getType(), CI->getPointerOperand(),
+ TLI->getMinCmpXchgSizeInBits() / 8);
+
+ Value *CmpVal_Shifted = Builder.CreateShl(
+ Builder.CreateZExt(CI->getCompareOperand(), PMV.WordType), PMV.ShiftAmt,
+ "CmpVal_Shifted");
+ Value *NewVal_Shifted = Builder.CreateShl(
+ Builder.CreateZExt(CI->getNewValOperand(), PMV.WordType), PMV.ShiftAmt,
+ "NewVal_Shifted");
+ Value *OldVal = TLI->emitMaskedAtomicCmpXchgIntrinsic(
+ Builder, CI, PMV.AlignedAddr, CmpVal_Shifted, NewVal_Shifted, PMV.Mask,
+ CI->getSuccessOrdering());
+ Value *FinalOldVal = Builder.CreateTrunc(
+ Builder.CreateLShr(OldVal, PMV.ShiftAmt), PMV.ValueType);
+
+ Value *Res = UndefValue::get(CI->getType());
+ Res = Builder.CreateInsertValue(Res, FinalOldVal, 0);
+ Value *Success = Builder.CreateICmpEQ(
+ CmpVal_Shifted, Builder.CreateAnd(OldVal, PMV.Mask), "Success");
+ Res = Builder.CreateInsertValue(Res, Success, 1);
+
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+}
+
Value *AtomicExpand::insertRMWLLSCLoop(
IRBuilder<> &Builder, Type *ResultTy, Value *Addr,
AtomicOrdering MemOpOrder,
@@ -1366,8 +1396,8 @@ bool AtomicExpand::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return expandAtomicCmpXchg(CI);
}
case TargetLoweringBase::AtomicExpansionKind::MaskedIntrinsic:
- llvm_unreachable(
- "MaskedIntrinsic expansion of cmpxhg not yet implemented");
+ expandAtomicCmpXchgToMaskedIntrinsic(CI);
+ return true;
}
}
OpenPOWER on IntegriCloud