summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/AsmParser/LLParser.cpp40
-rw-r--r--llvm/lib/Bitcode/Reader/BitcodeReader.cpp2
-rw-r--r--llvm/lib/Bitcode/Writer/BitcodeWriter.cpp2
-rw-r--r--llvm/lib/CodeGen/AtomicExpandPass.cpp6
-rw-r--r--llvm/lib/IR/Instructions.cpp4
-rw-r--r--llvm/lib/IR/Verifier.cpp5
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp3
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp3
-rw-r--r--llvm/lib/Target/Hexagon/HexagonISelLowering.cpp19
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp3
10 files changed, 73 insertions, 14 deletions
diff --git a/llvm/lib/AsmParser/LLParser.cpp b/llvm/lib/AsmParser/LLParser.cpp
index 20d96061102..855c5d26500 100644
--- a/llvm/lib/AsmParser/LLParser.cpp
+++ b/llvm/lib/AsmParser/LLParser.cpp
@@ -6815,6 +6815,7 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
AtomicOrdering Ordering = AtomicOrdering::NotAtomic;
SyncScope::ID SSID = SyncScope::System;
bool isVolatile = false;
+ bool IsFP = false;
AtomicRMWInst::BinOp Operation;
if (EatIfPresent(lltok::kw_volatile))
@@ -6833,6 +6834,14 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
case lltok::kw_min: Operation = AtomicRMWInst::Min; break;
case lltok::kw_umax: Operation = AtomicRMWInst::UMax; break;
case lltok::kw_umin: Operation = AtomicRMWInst::UMin; break;
+ case lltok::kw_fadd:
+ Operation = AtomicRMWInst::FAdd;
+ IsFP = true;
+ break;
+ case lltok::kw_fsub:
+ Operation = AtomicRMWInst::FSub;
+ IsFP = true;
+ break;
}
Lex.Lex(); // Eat the operation.
@@ -6849,18 +6858,25 @@ int LLParser::ParseAtomicRMW(Instruction *&Inst, PerFunctionState &PFS) {
if (cast<PointerType>(Ptr->getType())->getElementType() != Val->getType())
return Error(ValLoc, "atomicrmw value and pointer type do not match");
- if (Operation != AtomicRMWInst::Xchg && !Val->getType()->isIntegerTy()) {
- return Error(ValLoc, "atomicrmw " +
- AtomicRMWInst::getOperationName(Operation) +
- " operand must be an integer");
- }
-
- if (Operation == AtomicRMWInst::Xchg &&
- !Val->getType()->isIntegerTy() &&
- !Val->getType()->isFloatingPointTy()) {
- return Error(ValLoc, "atomicrmw " +
- AtomicRMWInst::getOperationName(Operation) +
- " operand must be an integer or floating point type");
+ if (Operation == AtomicRMWInst::Xchg) {
+ if (!Val->getType()->isIntegerTy() &&
+ !Val->getType()->isFloatingPointTy()) {
+ return Error(ValLoc, "atomicrmw " +
+ AtomicRMWInst::getOperationName(Operation) +
+ " operand must be an integer or floating point type");
+ }
+ } else if (IsFP) {
+ if (!Val->getType()->isFloatingPointTy()) {
+ return Error(ValLoc, "atomicrmw " +
+ AtomicRMWInst::getOperationName(Operation) +
+ " operand must be a floating point type");
+ }
+ } else {
+ if (!Val->getType()->isIntegerTy()) {
+ return Error(ValLoc, "atomicrmw " +
+ AtomicRMWInst::getOperationName(Operation) +
+ " operand must be an integer");
+ }
}
unsigned Size = Val->getType()->getPrimitiveSizeInBits();
diff --git a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
index 87bd8f625fa..0b93a61dc40 100644
--- a/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/llvm/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -1034,6 +1034,8 @@ static AtomicRMWInst::BinOp getDecodedRMWOperation(unsigned Val) {
case bitc::RMW_MIN: return AtomicRMWInst::Min;
case bitc::RMW_UMAX: return AtomicRMWInst::UMax;
case bitc::RMW_UMIN: return AtomicRMWInst::UMin;
+ case bitc::RMW_FADD: return AtomicRMWInst::FAdd;
+ case bitc::RMW_FSUB: return AtomicRMWInst::FSub;
}
}
diff --git a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
index d345ae0c416..f4a539e51f7 100644
--- a/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/llvm/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -559,6 +559,8 @@ static unsigned getEncodedRMWOperation(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::Min: return bitc::RMW_MIN;
case AtomicRMWInst::UMax: return bitc::RMW_UMAX;
case AtomicRMWInst::UMin: return bitc::RMW_UMIN;
+ case AtomicRMWInst::FAdd: return bitc::RMW_FADD;
+ case AtomicRMWInst::FSub: return bitc::RMW_FSUB;
}
}
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index bef9682fbd5..2d915945392 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -549,6 +549,10 @@ static Value *performAtomicOp(AtomicRMWInst::BinOp Op, IRBuilder<> &Builder,
case AtomicRMWInst::UMin:
NewVal = Builder.CreateICmpULE(Loaded, Inc);
return Builder.CreateSelect(NewVal, Loaded, Inc, "new");
+ case AtomicRMWInst::FAdd:
+ return Builder.CreateFAdd(Loaded, Inc, "new");
+ case AtomicRMWInst::FSub:
+ return Builder.CreateFSub(Loaded, Inc, "new");
default:
llvm_unreachable("Unknown atomic op");
}
@@ -1547,6 +1551,8 @@ static ArrayRef<RTLIB::Libcall> GetRMWLibcall(AtomicRMWInst::BinOp Op) {
case AtomicRMWInst::Min:
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
+ case AtomicRMWInst::FAdd:
+ case AtomicRMWInst::FSub:
// No atomic libcalls are available for max/min/umax/umin.
return {};
}
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 8b674465792..f48a970fd40 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1407,6 +1407,10 @@ StringRef AtomicRMWInst::getOperationName(BinOp Op) {
return "umax";
case AtomicRMWInst::UMin:
return "umin";
+ case AtomicRMWInst::FAdd:
+ return "fadd";
+ case AtomicRMWInst::FSub:
+ return "fsub";
case AtomicRMWInst::BAD_BINOP:
return "<invalid operation>";
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index fb2388c232b..1000e210533 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3435,6 +3435,11 @@ void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
AtomicRMWInst::getOperationName(Op) +
" operand must have integer or floating point type!",
&RMWI, ElTy);
+ } else if (AtomicRMWInst::isFPOperation(Op)) {
+ Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
+ AtomicRMWInst::getOperationName(Op) +
+ " operand must have floating point type!",
+ &RMWI, ElTy);
} else {
Assert(ElTy->isIntegerTy(), "atomicrmw " +
AtomicRMWInst::getOperationName(Op) +
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 2530b0af87d..df77e8e7d5b 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -11600,6 +11600,9 @@ AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
// For the real atomic operations, we have ldxr/stxr up to 128 bits,
TargetLowering::AtomicExpansionKind
AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ if (AI->isFloatingPointOperation())
+ return AtomicExpansionKind::CmpXChg;
+
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
if (Size > 128) return AtomicExpansionKind::None;
// Nand not supported in LSE.
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 5cf9a2fc2bf..469ceb9c213 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -14645,6 +14645,9 @@ ARMTargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
// and up to 64 bits on the non-M profiles
TargetLowering::AtomicExpansionKind
ARMTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ if (AI->isFloatingPointOperation())
+ return AtomicExpansionKind::CmpXChg;
+
unsigned Size = AI->getType()->getPrimitiveSizeInBits();
bool hasAtomicRMW = !Subtarget->isThumb() || Subtarget->hasV8MBaselineOps();
return (Size <= (Subtarget->isMClass() ? 32U : 64U) && hasAtomicRMW)
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 329d7aeb693..b3781fe9e71 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3110,13 +3110,21 @@ Value *HexagonTargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
AtomicOrdering Ord) const {
BasicBlock *BB = Builder.GetInsertBlock();
Module *M = BB->getParent()->getParent();
- Type *Ty = cast<PointerType>(Addr->getType())->getElementType();
+ auto PT = cast<PointerType>(Addr->getType());
+ Type *Ty = PT->getElementType();
unsigned SZ = Ty->getPrimitiveSizeInBits();
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic loads supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_L2_loadw_locked
: Intrinsic::hexagon_L4_loadd_locked;
+
+ PointerType *NewPtrTy
+ = Builder.getIntNTy(SZ)->getPointerTo(PT->getAddressSpace());
+ Addr = Builder.CreateBitCast(Addr, NewPtrTy);
+
Value *Fn = Intrinsic::getDeclaration(M, IntID);
- return Builder.CreateCall(Fn, Addr, "larx");
+ Value *Call = Builder.CreateCall(Fn, Addr, "larx");
+
+ return Builder.CreateBitCast(Call, Ty);
}
/// Perform a store-conditional operation to Addr. Return the status of the
@@ -3127,10 +3135,17 @@ Value *HexagonTargetLowering::emitStoreConditional(IRBuilder<> &Builder,
Module *M = BB->getParent()->getParent();
Type *Ty = Val->getType();
unsigned SZ = Ty->getPrimitiveSizeInBits();
+
+ Type *CastTy = Builder.getIntNTy(SZ);
assert((SZ == 32 || SZ == 64) && "Only 32/64-bit atomic stores supported");
Intrinsic::ID IntID = (SZ == 32) ? Intrinsic::hexagon_S2_storew_locked
: Intrinsic::hexagon_S4_stored_locked;
Value *Fn = Intrinsic::getDeclaration(M, IntID);
+
+ unsigned AS = Addr->getType()->getPointerAddressSpace();
+ Addr = Builder.CreateBitCast(Addr, CastTy->getPointerTo(AS));
+ Val = Builder.CreateBitCast(Val, CastTy);
+
Value *Call = Builder.CreateCall(Fn, {Addr, Val}, "stcx");
Value *Cmp = Builder.CreateICmpEQ(Call, Builder.getInt32(0), "");
Value *Ext = Builder.CreateZExt(Cmp, Type::getInt32Ty(M->getContext()));
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8a32d957ec5..f70ffaab0c1 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1823,6 +1823,9 @@ Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
TargetLowering::AtomicExpansionKind
RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
AtomicCmpXchgInst *CI) const {
+ if (CI->isFloatingPointOperation())
+ return AtomicExpansionKind::CmpXChg;
+
unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
if (Size == 8 || Size == 16)
return AtomicExpansionKind::MaskedIntrinsic;
OpenPOWER on IntegriCloud