summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h2
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAGNodes.h4
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td10
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp2
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp10
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h3
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp48
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h1
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td8
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll109
-rw-r--r--llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll264
-rw-r--r--llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll201
-rw-r--r--llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll112
18 files changed, 765 insertions, 17 deletions
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 9c11c4f9e82..b664f7e2010 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -818,6 +818,8 @@ namespace ISD {
ATOMIC_LOAD_MAX,
ATOMIC_LOAD_UMIN,
ATOMIC_LOAD_UMAX,
+ ATOMIC_LOAD_FADD,
+ ATOMIC_LOAD_FSUB,
// Masked load and store - consecutive vector load and store operations
// with additional mask operand that prevents memory accesses to the
diff --git a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
index ec0fce6202b..936c3c5bd15 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -1359,6 +1359,8 @@ public:
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE ||
N->getOpcode() == ISD::MLOAD ||
@@ -1411,6 +1413,8 @@ public:
N->getOpcode() == ISD::ATOMIC_LOAD_MAX ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMIN ||
N->getOpcode() == ISD::ATOMIC_LOAD_UMAX ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FADD ||
+ N->getOpcode() == ISD::ATOMIC_LOAD_FSUB ||
N->getOpcode() == ISD::ATOMIC_LOAD ||
N->getOpcode() == ISD::ATOMIC_STORE;
}
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index c494446f8e3..f9b26d53e41 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -264,6 +264,11 @@ def SDTAtomic3 : SDTypeProfile<1, 3, [
def SDTAtomic2 : SDTypeProfile<1, 2, [
SDTCisSameAs<0,2>, SDTCisInt<0>, SDTCisPtrTy<1>
]>;
+
+def SDTFPAtomic2 : SDTypeProfile<1, 2, [
+ SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
+]>;
+
def SDTAtomicStore : SDTypeProfile<0, 2, [
SDTCisPtrTy<0>, SDTCisInt<1>
]>;
@@ -510,6 +515,11 @@ def atomic_load_umin : SDNode<"ISD::ATOMIC_LOAD_UMIN", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
def atomic_load_umax : SDNode<"ISD::ATOMIC_LOAD_UMAX", SDTAtomic2,
[SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fadd : SDNode<"ISD::ATOMIC_LOAD_FADD" , SDTFPAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+def atomic_load_fsub : SDNode<"ISD::ATOMIC_LOAD_FSUB" , SDTFPAtomic2,
+ [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand]>;
+
def atomic_load : SDNode<"ISD::ATOMIC_LOAD", SDTAtomicLoad,
[SDNPHasChain, SDNPMayLoad, SDNPMemOperand]>;
def atomic_store : SDNode<"ISD::ATOMIC_STORE", SDTAtomicStore,
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 1b5a4f3b376..ead36479fc8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -6464,6 +6464,8 @@ SDValue SelectionDAG::getAtomic(unsigned Opcode, const SDLoc &dl, EVT MemVT,
Opcode == ISD::ATOMIC_LOAD_MAX ||
Opcode == ISD::ATOMIC_LOAD_UMIN ||
Opcode == ISD::ATOMIC_LOAD_UMAX ||
+ Opcode == ISD::ATOMIC_LOAD_FADD ||
+ Opcode == ISD::ATOMIC_LOAD_FSUB ||
Opcode == ISD::ATOMIC_SWAP ||
Opcode == ISD::ATOMIC_STORE) &&
"Invalid Atomic Op");
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 16952d25733..3470e74fe96 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4204,6 +4204,8 @@ void SelectionDAGBuilder::visitAtomicRMW(const AtomicRMWInst &I) {
case AtomicRMWInst::Min: NT = ISD::ATOMIC_LOAD_MIN; break;
case AtomicRMWInst::UMax: NT = ISD::ATOMIC_LOAD_UMAX; break;
case AtomicRMWInst::UMin: NT = ISD::ATOMIC_LOAD_UMIN; break;
+ case AtomicRMWInst::FAdd: NT = ISD::ATOMIC_LOAD_FADD; break;
+ case AtomicRMWInst::FSub: NT = ISD::ATOMIC_LOAD_FSUB; break;
}
AtomicOrdering Order = I.getOrdering();
SyncScope::ID SSID = I.getSyncScopeID();
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 0966705fc31..e7d9668a876 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -95,6 +95,7 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::ATOMIC_LOAD_MAX: return "AtomicLoadMax";
case ISD::ATOMIC_LOAD_UMIN: return "AtomicLoadUMin";
case ISD::ATOMIC_LOAD_UMAX: return "AtomicLoadUMax";
+ case ISD::ATOMIC_LOAD_FADD: return "AtomicLoadFAdd";
case ISD::ATOMIC_LOAD: return "AtomicLoad";
case ISD::ATOMIC_STORE: return "AtomicStore";
case ISD::PCMARKER: return "PCMarker";
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
index 1dad02b9a33..1bc6be45056 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp
@@ -469,7 +469,7 @@ void AMDGPUDAGToDAGISel::Select(SDNode *N) {
if (isa<AtomicSDNode>(N) ||
(Opc == AMDGPUISD::ATOMIC_INC || Opc == AMDGPUISD::ATOMIC_DEC ||
- Opc == AMDGPUISD::ATOMIC_LOAD_FADD ||
+ Opc == ISD::ATOMIC_LOAD_FADD ||
Opc == AMDGPUISD::ATOMIC_LOAD_FMIN ||
Opc == AMDGPUISD::ATOMIC_LOAD_FMAX))
N = glueCopyToM0(N);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index b68488882f7..f170bc53521 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4194,7 +4194,6 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(ATOMIC_CMP_SWAP)
NODE_NAME_CASE(ATOMIC_INC)
NODE_NAME_CASE(ATOMIC_DEC)
- NODE_NAME_CASE(ATOMIC_LOAD_FADD)
NODE_NAME_CASE(ATOMIC_LOAD_FMIN)
NODE_NAME_CASE(ATOMIC_LOAD_FMAX)
NODE_NAME_CASE(BUFFER_LOAD)
@@ -4518,7 +4517,12 @@ bool AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
TargetLowering::AtomicExpansionKind
AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
- if (RMW->getOperation() == AtomicRMWInst::Nand)
+ switch (RMW->getOperation()) {
+ case AtomicRMWInst::Nand:
+ case AtomicRMWInst::FAdd:
+ case AtomicRMWInst::FSub:
return AtomicExpansionKind::CmpXChg;
- return AtomicExpansionKind::None;
+ default:
+ return AtomicExpansionKind::None;
+ }
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 0449c08e2f9..557479ab3f6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -477,7 +477,6 @@ enum NodeType : unsigned {
ATOMIC_CMP_SWAP,
ATOMIC_INC,
ATOMIC_DEC,
- ATOMIC_LOAD_FADD,
ATOMIC_LOAD_FMIN,
ATOMIC_LOAD_FMAX,
BUFFER_LOAD,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
index ff94eb45850..06d750d7b89 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.h
@@ -791,6 +791,9 @@ public:
return HasScalarAtomics;
}
+ bool hasLDSFPAtomics() const {
+ return VIInsts;
+ }
bool hasDPP() const {
return HasDPP;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 560c87b6a56..117014dcc22 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -699,6 +699,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_FADD);
setSchedulingPreference(Sched::RegPressure);
@@ -5491,9 +5492,21 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
M->getVTList(), Ops, M->getMemoryVT(),
M->getMemOperand());
}
+ case Intrinsic::amdgcn_ds_fadd: {
+ MemSDNode *M = cast<MemSDNode>(Op);
+ unsigned Opc;
+ switch (IntrID) {
+ case Intrinsic::amdgcn_ds_fadd:
+ Opc = ISD::ATOMIC_LOAD_FADD;
+ break;
+ }
+
+ return DAG.getAtomic(Opc, SDLoc(Op), M->getMemoryVT(),
+ M->getOperand(0), M->getOperand(2), M->getOperand(3),
+ M->getMemOperand());
+ }
case Intrinsic::amdgcn_atomic_inc:
case Intrinsic::amdgcn_atomic_dec:
- case Intrinsic::amdgcn_ds_fadd:
case Intrinsic::amdgcn_ds_fmin:
case Intrinsic::amdgcn_ds_fmax: {
MemSDNode *M = cast<MemSDNode>(Op);
@@ -5505,9 +5518,6 @@ SDValue SITargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
case Intrinsic::amdgcn_atomic_dec:
Opc = AMDGPUISD::ATOMIC_DEC;
break;
- case Intrinsic::amdgcn_ds_fadd:
- Opc = AMDGPUISD::ATOMIC_LOAD_FADD;
- break;
case Intrinsic::amdgcn_ds_fmin:
Opc = AMDGPUISD::ATOMIC_LOAD_FMIN;
break;
@@ -8926,11 +8936,11 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::ATOMIC_LOAD_MAX:
case ISD::ATOMIC_LOAD_UMIN:
case ISD::ATOMIC_LOAD_UMAX:
+ case ISD::ATOMIC_LOAD_FADD:
case AMDGPUISD::ATOMIC_INC:
case AMDGPUISD::ATOMIC_DEC:
- case AMDGPUISD::ATOMIC_LOAD_FADD:
case AMDGPUISD::ATOMIC_LOAD_FMIN:
- case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
+ case AMDGPUISD::ATOMIC_LOAD_FMAX: // TODO: Target mem intrinsics.
if (DCI.isBeforeLegalize())
break;
return performMemSDNodeCombine(cast<MemSDNode>(N), DCI);
@@ -9722,3 +9732,29 @@ bool SITargetLowering::isKnownNeverNaNForTargetNode(SDValue Op,
return AMDGPUTargetLowering::isKnownNeverNaNForTargetNode(Op, DAG,
SNaN, Depth);
}
+
+TargetLowering::AtomicExpansionKind
+SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
+ switch (RMW->getOperation()) {
+ case AtomicRMWInst::FAdd: {
+ Type *Ty = RMW->getType();
+
+ // We don't have a way to support 16-bit atomics now, so just leave them
+ // as-is.
+ if (Ty->isHalfTy())
+ return AtomicExpansionKind::None;
+
+ if (!Ty->isFloatTy())
+ return AtomicExpansionKind::CmpXChg;
+
+ // TODO: Do have these for flat. Older targets also had them for buffers.
+ unsigned AS = RMW->getPointerAddressSpace();
+ return (AS == AMDGPUAS::LOCAL_ADDRESS && Subtarget->hasLDSFPAtomics()) ?
+ AtomicExpansionKind::None : AtomicExpansionKind::CmpXChg;
+ }
+ default:
+ break;
+ }
+
+ return AMDGPUTargetLowering::shouldExpandAtomicRMWInIR(RMW);
+}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index e11a6df3395..62758ddbdcc 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -351,6 +351,7 @@ public:
const SelectionDAG &DAG,
bool SNaN = false,
unsigned Depth = 0) const override;
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const override;
};
} // End namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index f66d480b8d5..8438fc016be 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -61,10 +61,6 @@ def SDTAtomic2_f32 : SDTypeProfile<1, 2, [
SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
]>;
-def SIatomic_fadd : SDNode<"AMDGPUISD::ATOMIC_LOAD_FADD", SDTAtomic2_f32,
- [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
->;
-
def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32,
[SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
>;
@@ -232,7 +228,7 @@ defm atomic_dec_global : global_binary_atomic_op<SIatomic_dec>;
def atomic_inc_local : local_binary_atomic_op<SIatomic_inc>;
def atomic_dec_local : local_binary_atomic_op<SIatomic_dec>;
-def atomic_load_fadd_local : local_binary_atomic_op<SIatomic_fadd>;
+def atomic_load_fadd_local : local_binary_atomic_op<atomic_load_fadd>;
def atomic_load_fmin_local : local_binary_atomic_op<SIatomic_fmin>;
def atomic_load_fmax_local : local_binary_atomic_op<SIatomic_fmax>;
@@ -428,7 +424,7 @@ defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
defm atomic_swap : SIAtomicM0Glue2 <"SWAP">;
-defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 1, SDTAtomic2_f32>;
+defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32>;
defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32>;
defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32>;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 9a1341b81c7..3bcd7ed40bf 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -25234,6 +25234,8 @@ X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
case AtomicRMWInst::Min:
case AtomicRMWInst::UMax:
case AtomicRMWInst::UMin:
+ case AtomicRMWInst::FAdd:
+ case AtomicRMWInst::FSub:
// These always require a non-trivial set of data operations on x86. We must
// use a cmpxchg loop.
return AtomicExpansionKind::CmpXChg;
diff --git a/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
new file mode 100644
index 00000000000..17c45c1a2f4
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/local-atomics-fp.ll
@@ -0,0 +1,109 @@
+; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,GFX678,HAS-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,HAS-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
+; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX678,NO-ATOMICS %s
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f32:
+; GFX678-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
+; HAS-ATOMICS: ds_add_rtn_f32 v0, v0, [[K]]
+
+; NO-ATOMICS: ds_read_b32
+; NO-ATOMICS: v_add_f32
+; NO-ATOMICS: ds_cmpst_rtn_b32
+; NO-ATOMICS: s_cbranch_execnz
+define float @lds_atomic_fadd_ret_f32(float addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst
+ ret float %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f32:
+; GFX678-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 4.0
+; HAS-ATOMICS: ds_add_f32 v0, [[K]]
+define void @lds_atomic_fadd_noret_f32(float addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw fadd float addrspace(3)* %ptr, float 4.0 seq_cst
+ ret void
+}
+
+; GCN-LABEL: {{^}}lds_ds_fadd:
+; VI-DAG: s_mov_b32 m0
+; GFX9-NOT: m0
+; HAS-ATOMICS-DAG: v_mov_b32_e32 [[V0:v[0-9]+]], 0x42280000
+; HAS-ATOMICS: ds_add_rtn_f32 [[V2:v[0-9]+]], [[V1:v[0-9]+]], [[V0]] offset:32
+; HAS-ATOMICS: ds_add_f32 [[V3:v[0-9]+]], [[V0]] offset:64
+; HAS-ATOMICS: s_waitcnt lgkmcnt(1)
+; HAS-ATOMICS: ds_add_rtn_f32 {{v[0-9]+}}, {{v[0-9]+}}, [[V2]]
+define amdgpu_kernel void @lds_ds_fadd(float addrspace(1)* %out, float addrspace(3)* %ptrf, i32 %idx) {
+ %idx.add = add nuw i32 %idx, 4
+ %shl0 = shl i32 %idx.add, 3
+ %shl1 = shl i32 %idx.add, 4
+ %ptr0 = inttoptr i32 %shl0 to float addrspace(3)*
+ %ptr1 = inttoptr i32 %shl1 to float addrspace(3)*
+ %a1 = atomicrmw fadd float addrspace(3)* %ptr0, float 4.2e+1 seq_cst
+ %a2 = atomicrmw fadd float addrspace(3)* %ptr1, float 4.2e+1 seq_cst
+ %a3 = atomicrmw fadd float addrspace(3)* %ptrf, float %a1 seq_cst
+ store float %a3, float addrspace(1)* %out
+ ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_ret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define double @lds_atomic_fadd_ret_f64(double addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
+ ret double %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fadd_noret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define void @lds_atomic_fadd_noret_f64(double addrspace(3)* %ptr) nounwind {
+ %result = atomicrmw fadd double addrspace(3)* %ptr, double 4.0 seq_cst
+ ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f32:
+; GCN: ds_read_b32
+; GCN: v_sub_f32
+; GCN: ds_cmpst_rtn_b32
+; GCN: s_cbranch_execnz
+define float @lds_atomic_fsub_ret_f32(float addrspace(3)* %ptr, float %val) nounwind {
+ %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst
+ ret float %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f32:
+; GCN: ds_read_b32
+; GCN: v_sub_f32
+; GCN: ds_cmpst_rtn_b32
+define void @lds_atomic_fsub_noret_f32(float addrspace(3)* %ptr, float %val) nounwind {
+ %result = atomicrmw fsub float addrspace(3)* %ptr, float %val seq_cst
+ ret void
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_ret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; GCN: ds_cmpst_rtn_b64
+
+define double @lds_atomic_fsub_ret_f64(double addrspace(3)* %ptr, double %val) nounwind {
+ %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst
+ ret double %result
+}
+
+; GCN-LABEL: {{^}}lds_atomic_fsub_noret_f64:
+; GCN: ds_read_b64
+; GCN: v_add_f64 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+:[0-9]+\]}}
+; GCN: ds_cmpst_rtn_b64
+; GCN: s_cbranch_execnz
+define void @lds_atomic_fsub_noret_f64(double addrspace(3)* %ptr, double %val) nounwind {
+ %result = atomicrmw fsub double addrspace(3)* %ptr, double %val seq_cst
+ ret void
+}
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
new file mode 100644
index 00000000000..f96cd0b29bf
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fadd.ll
@@ -0,0 +1,264 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=CI %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GFX9 %s
+
+define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_flat(
+; CI-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_flat(
+; GFX9-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX9: atomicrmw.start:
+; GFX9-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; GFX9-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GFX9-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GFX9-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9: atomicrmw.end:
+; GFX9-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fadd float* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define float @test_atomicrmw_fadd_f32_global(float addrspace(1)* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_global(
+; CI-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_global(
+; GFX9-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX9: atomicrmw.start:
+; GFX9-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; GFX9-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GFX9-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GFX9-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9: atomicrmw.end:
+; GFX9-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define float @test_atomicrmw_fadd_f32_local(float addrspace(3)* %ptr, float %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f32_local(
+; CI-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)*
+; CI-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CI-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret float [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f32_local(
+; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd float addrspace(3)* [[PTR:%.*]], float [[VALUE:%.*]] seq_cst
+; GFX9-NEXT: ret float [[RES]]
+;
+ %res = atomicrmw fadd float addrspace(3)* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define half @test_atomicrmw_fadd_f16_flat(half* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_flat(
+; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT: ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_flat(
+; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT: ret half [[RES]]
+;
+ %res = atomicrmw fadd half* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define half @test_atomicrmw_fadd_f16_global(half addrspace(1)* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_global(
+; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT: ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_global(
+; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(1)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT: ret half [[RES]]
+;
+ %res = atomicrmw fadd half addrspace(1)* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define half @test_atomicrmw_fadd_f16_local(half addrspace(3)* %ptr, half %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f16_local(
+; CI-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; CI-NEXT: ret half [[RES]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f16_local(
+; GFX9-NEXT: [[RES:%.*]] = atomicrmw fadd half addrspace(3)* [[PTR:%.*]], half [[VALUE:%.*]] seq_cst
+; GFX9-NEXT: ret half [[RES]]
+;
+ %res = atomicrmw fadd half addrspace(3)* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define double @test_atomicrmw_fadd_f64_flat(double* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_flat(
+; CI-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_flat(
+; GFX9-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX9: atomicrmw.start:
+; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9: atomicrmw.end:
+; GFX9-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fadd double* %ptr, double %value seq_cst
+ ret double %res
+}
+
+define double @test_atomicrmw_fadd_f64_global(double addrspace(1)* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_global(
+; CI-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_global(
+; GFX9-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX9: atomicrmw.start:
+; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9: atomicrmw.end:
+; GFX9-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fadd double addrspace(1)* %ptr, double %value seq_cst
+ ret double %res
+}
+
+define double @test_atomicrmw_fadd_f64_local(double addrspace(3)* %ptr, double %value) {
+; CI-LABEL: @test_atomicrmw_fadd_f64_local(
+; CI-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; CI-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CI: atomicrmw.start:
+; CI-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CI-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CI-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; CI-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CI-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CI-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CI-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CI-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CI-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CI-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CI: atomicrmw.end:
+; CI-NEXT: ret double [[TMP6]]
+;
+; GFX9-LABEL: @test_atomicrmw_fadd_f64_local(
+; GFX9-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; GFX9-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GFX9: atomicrmw.start:
+; GFX9-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GFX9-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; GFX9-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; GFX9-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GFX9-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GFX9-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GFX9-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GFX9-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GFX9-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GFX9-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GFX9: atomicrmw.end:
+; GFX9-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fadd double addrspace(3)* %ptr, double %value seq_cst
+ ret double %res
+}
+
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
new file mode 100644
index 00000000000..bfbdf9b6e86
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/expand-atomic-rmw-fsub.ll
@@ -0,0 +1,201 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -atomic-expand %s | FileCheck -check-prefix=GCN %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+
+define float @test_atomicrmw_fadd_f32_flat(float* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fadd_f32_flat(
+; GCN-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fsub float* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32_global(float addrspace(1)* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f32_global(
+; GCN-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fsub float addrspace(1)* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32_local(float addrspace(3)* %ptr, float %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f32_local(
+; GCN-NEXT: [[TMP1:%.*]] = load float, float addrspace(3)* [[PTR:%.*]], align 4
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast float addrspace(3)* [[PTR]] to i32 addrspace(3)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; GCN-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(3)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fsub float addrspace(3)* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define half @test_atomicrmw_fsub_f16_flat(half* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_flat(
+; GCN-NEXT: [[TMP1:%.*]] = load half, half* [[PTR:%.*]], align 2
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast half* [[PTR]] to i16*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret half [[TMP6]]
+;
+ %res = atomicrmw fsub half* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define half @test_atomicrmw_fsub_f16_global(half addrspace(1)* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_global(
+; GCN-NEXT: [[TMP1:%.*]] = load half, half addrspace(1)* [[PTR:%.*]], align 2
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast half addrspace(1)* [[PTR]] to i16 addrspace(1)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16 addrspace(1)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret half [[TMP6]]
+;
+ %res = atomicrmw fsub half addrspace(1)* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define half @test_atomicrmw_fsub_f16_local(half addrspace(3)* %ptr, half %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f16_local(
+; GCN-NEXT: [[TMP1:%.*]] = load half, half addrspace(3)* [[PTR:%.*]], align 2
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi half [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub half [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast half addrspace(3)* [[PTR]] to i16 addrspace(3)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast half [[NEW]] to i16
+; GCN-NEXT: [[TMP4:%.*]] = bitcast half [[LOADED]] to i16
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i16 addrspace(3)* [[TMP2]], i16 [[TMP4]], i16 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i16, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i16, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i16 [[NEWLOADED]] to half
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret half [[TMP6]]
+;
+ %res = atomicrmw fsub half addrspace(3)* %ptr, half %value seq_cst
+ ret half %res
+}
+
+define double @test_atomicrmw_fsub_f64_flat(double* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_flat(
+; GCN-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fsub double* %ptr, double %value seq_cst
+ ret double %res
+}
+
+define double @test_atomicrmw_fsub_f64_global(double addrspace(1)* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_global(
+; GCN-NEXT: [[TMP1:%.*]] = load double, double addrspace(1)* [[PTR:%.*]], align 8
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast double addrspace(1)* [[PTR]] to i64 addrspace(1)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(1)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fsub double addrspace(1)* %ptr, double %value seq_cst
+ ret double %res
+}
+
+define double @test_atomicrmw_fsub_f64_local(double addrspace(3)* %ptr, double %value) {
+; GCN-LABEL: @test_atomicrmw_fsub_f64_local(
+; GCN-NEXT: [[TMP1:%.*]] = load double, double addrspace(3)* [[PTR:%.*]], align 8
+; GCN-NEXT: br label [[ATOMICRMW_START:%.*]]
+; GCN: atomicrmw.start:
+; GCN-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; GCN-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; GCN-NEXT: [[TMP2:%.*]] = bitcast double addrspace(3)* [[PTR]] to i64 addrspace(3)*
+; GCN-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; GCN-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; GCN-NEXT: [[TMP5:%.*]] = cmpxchg i64 addrspace(3)* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; GCN-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; GCN-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; GCN-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; GCN-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; GCN: atomicrmw.end:
+; GCN-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fsub double addrspace(3)* %ptr, double %value seq_cst
+ ret double %res
+}
diff --git a/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
new file mode 100644
index 00000000000..e37f9bb7d56
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/X86/expand-atomic-rmw-fp.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=i686-linux-gnu -atomic-expand %s | FileCheck %s
+
+define float @test_atomicrmw_fadd_f32(float* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f32(
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fadd float* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define double @test_atomicrmw_fadd_f64(double* %ptr, double %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f64(
+; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = fadd double [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fadd double* %ptr, double %value seq_cst
+ ret double %res
+}
+
+define float @test_atomicrmw_fadd_f32_as1(float addrspace(1)* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fadd_f32_as1(
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float addrspace(1)* [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = fadd float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float addrspace(1)* [[PTR]] to i32 addrspace(1)*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32 addrspace(1)* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fadd float addrspace(1)* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define float @test_atomicrmw_fsub_f32(float* %ptr, float %value) {
+; CHECK-LABEL: @test_atomicrmw_fsub_f32(
+; CHECK-NEXT: [[TMP1:%.*]] = load float, float* [[PTR:%.*]], align 4
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi float [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = fsub float [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast float* [[PTR]] to i32*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast float [[NEW]] to i32
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[LOADED]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i32* [[TMP2]], i32 [[TMP4]], i32 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i32, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i32, i1 } [[TMP5]], 0
+; CHECK-NEXT: [[TMP6]] = bitcast i32 [[NEWLOADED]] to float
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: ret float [[TMP6]]
+;
+ %res = atomicrmw fsub float* %ptr, float %value seq_cst
+ ret float %res
+}
+
+define double @test_atomicrmw_fsub_f64(double* %ptr, double %value) {
+; CHECK-LABEL: @test_atomicrmw_fsub_f64(
+; CHECK-NEXT: [[TMP1:%.*]] = load double, double* [[PTR:%.*]], align 8
+; CHECK-NEXT: br label [[ATOMICRMW_START:%.*]]
+; CHECK: atomicrmw.start:
+; CHECK-NEXT: [[LOADED:%.*]] = phi double [ [[TMP1]], [[TMP0:%.*]] ], [ [[TMP6:%.*]], [[ATOMICRMW_START]] ]
+; CHECK-NEXT: [[NEW:%.*]] = fsub double [[LOADED]], [[VALUE:%.*]]
+; CHECK-NEXT: [[TMP2:%.*]] = bitcast double* [[PTR]] to i64*
+; CHECK-NEXT: [[TMP3:%.*]] = bitcast double [[NEW]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[LOADED]] to i64
+; CHECK-NEXT: [[TMP5:%.*]] = cmpxchg i64* [[TMP2]], i64 [[TMP4]], i64 [[TMP3]] seq_cst seq_cst
+; CHECK-NEXT: [[SUCCESS:%.*]] = extractvalue { i64, i1 } [[TMP5]], 1
+; CHECK-NEXT: [[NEWLOADED:%.*]] = extractvalue { i64, i1 } [[TMP5]], 0
+; CHECK-NEXT: [[TMP6]] = bitcast i64 [[NEWLOADED]] to double
+; CHECK-NEXT: br i1 [[SUCCESS]], label [[ATOMICRMW_END:%.*]], label [[ATOMICRMW_START]]
+; CHECK: atomicrmw.end:
+; CHECK-NEXT: ret double [[TMP6]]
+;
+ %res = atomicrmw fsub double* %ptr, double %value seq_cst
+ ret double %res
+}
OpenPOWER on IntegriCloud