summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/R600/SIISelLowering.cpp109
-rw-r--r--llvm/lib/Target/R600/SIISelLowering.h3
-rw-r--r--llvm/lib/Target/R600/SIInstrInfo.cpp22
-rw-r--r--llvm/lib/Target/R600/SIInstrInfo.h4
-rw-r--r--llvm/test/CodeGen/R600/shl_add_ptr.ll286
5 files changed, 423 insertions, 1 deletions
diff --git a/llvm/lib/Target/R600/SIISelLowering.cpp b/llvm/lib/Target/R600/SIISelLowering.cpp
index 911c5e55949..6d2e9575e59 100644
--- a/llvm/lib/Target/R600/SIISelLowering.cpp
+++ b/llvm/lib/Target/R600/SIISelLowering.cpp
@@ -235,6 +235,26 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setTargetDAGCombine(ISD::UINT_TO_FP);
+ // All memory operations. Some folding on the pointer operand is done to help
+ // matching the constant offsets in the addressing modes.
+ setTargetDAGCombine(ISD::LOAD);
+ setTargetDAGCombine(ISD::STORE);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD);
+ setTargetDAGCombine(ISD::ATOMIC_STORE);
+ setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP);
+ setTargetDAGCombine(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS);
+ setTargetDAGCombine(ISD::ATOMIC_SWAP);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_ADD);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_SUB);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_AND);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_OR);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_XOR);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_NAND);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_MIN);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_MAX);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_UMIN);
+ setTargetDAGCombine(ISD::ATOMIC_LOAD_UMAX);
+
setSchedulingPreference(Sched::RegPressure);
}
@@ -1296,6 +1316,56 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
return SDValue();
}
+// (shl (add x, c1), c2) -> add (shl x, c2), (shl c1, c2)
+
+// This is a variant of
+// (mul (add x, c1), c2) -> add (mul x, c2), (mul c1, c2),
+//
+// The normal DAG combiner will do this, but only if the add has one use since
+// that would increase the number of instructions.
+//
+// This prevents us from seeing a constant offset that can be folded into a
+// memory instruction's addressing mode. If we know the resulting add offset of
+// a pointer can be folded into an addressing offset, we can replace the pointer
+// operand with the add of new constant offset. This eliminates one of the uses,
+// and may allow the remaining use to also be simplified.
+//
+SDValue SITargetLowering::performSHLPtrCombine(SDNode *N,
+ unsigned AddrSpace,
+ DAGCombinerInfo &DCI) const {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+
+ if (N0.getOpcode() != ISD::ADD)
+ return SDValue();
+
+ const ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(N1);
+ if (!CN1)
+ return SDValue();
+
+ const ConstantSDNode *CAdd = dyn_cast<ConstantSDNode>(N0.getOperand(1));
+ if (!CAdd)
+ return SDValue();
+
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ getTargetMachine().getSubtargetImpl()->getInstrInfo());
+
+ // If the resulting offset is too large, we can't fold it into the addressing
+ // mode offset.
+ APInt Offset = CAdd->getAPIntValue() << CN1->getAPIntValue();
+ if (!TII->canFoldOffset(Offset.getZExtValue(), AddrSpace))
+ return SDValue();
+
+ SelectionDAG &DAG = DCI.DAG;
+ SDLoc SL(N);
+ EVT VT = N->getValueType(0);
+
+ SDValue ShlX = DAG.getNode(ISD::SHL, SL, VT, N0.getOperand(0), N1);
+ SDValue COffset = DAG.getConstant(Offset, MVT::i32);
+
+ return DAG.getNode(ISD::ADD, SL, VT, ShlX, COffset);
+}
+
SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
@@ -1348,8 +1418,45 @@ SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
case ISD::UINT_TO_FP: {
return performUCharToFloatCombine(N, DCI);
}
- }
+ case ISD::LOAD:
+ case ISD::STORE:
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_STORE:
+ case ISD::ATOMIC_CMP_SWAP:
+ case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
+ case ISD::ATOMIC_SWAP:
+ case ISD::ATOMIC_LOAD_ADD:
+ case ISD::ATOMIC_LOAD_SUB:
+ case ISD::ATOMIC_LOAD_AND:
+ case ISD::ATOMIC_LOAD_OR:
+ case ISD::ATOMIC_LOAD_XOR:
+ case ISD::ATOMIC_LOAD_NAND:
+ case ISD::ATOMIC_LOAD_MIN:
+ case ISD::ATOMIC_LOAD_MAX:
+ case ISD::ATOMIC_LOAD_UMIN:
+ case ISD::ATOMIC_LOAD_UMAX: { // TODO: Target mem intrinsics.
+ if (DCI.isBeforeLegalize())
+ break;
+
+ MemSDNode *MemNode = cast<MemSDNode>(N);
+ SDValue Ptr = MemNode->getBasePtr();
+ // TODO: We could also do this for multiplies.
+ unsigned AS = MemNode->getAddressSpace();
+ if (Ptr.getOpcode() == ISD::SHL && AS != AMDGPUAS::PRIVATE_ADDRESS) {
+ SDValue NewPtr = performSHLPtrCombine(Ptr.getNode(), AS, DCI);
+ if (NewPtr) {
+ SmallVector<SDValue, 8> NewOps;
+ for (unsigned I = 0, N = MemNode->getNumOperands(); I != N; ++I)
+ NewOps.push_back(MemNode->getOperand(I));
+
+ NewOps[N->getOpcode() == ISD::STORE ? 2 : 1] = NewPtr;
+ return SDValue(DAG.UpdateNodeOperands(MemNode, NewOps), 0);
+ }
+ }
+ break;
+ }
+ }
return AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
}
diff --git a/llvm/lib/Target/R600/SIISelLowering.h b/llvm/lib/Target/R600/SIISelLowering.h
index 2a9aa49491f..b952bc0345a 100644
--- a/llvm/lib/Target/R600/SIISelLowering.h
+++ b/llvm/lib/Target/R600/SIISelLowering.h
@@ -56,6 +56,9 @@ class SITargetLowering : public AMDGPUTargetLowering {
static SDValue performUCharToFloatCombine(SDNode *N,
DAGCombinerInfo &DCI);
+ SDValue performSHLPtrCombine(SDNode *N,
+ unsigned AS,
+ DAGCombinerInfo &DCI) const;
public:
SITargetLowering(TargetMachine &tm);
diff --git a/llvm/lib/Target/R600/SIInstrInfo.cpp b/llvm/lib/Target/R600/SIInstrInfo.cpp
index 5cd8ddfaceb..3868f63b3a8 100644
--- a/llvm/lib/Target/R600/SIInstrInfo.cpp
+++ b/llvm/lib/Target/R600/SIInstrInfo.cpp
@@ -803,6 +803,28 @@ bool SIInstrInfo::isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
return RI.regClassCanUseImmediate(OpInfo.RegClass);
}
+bool SIInstrInfo::canFoldOffset(unsigned OffsetSize, unsigned AS) {
+ switch (AS) {
+ case AMDGPUAS::GLOBAL_ADDRESS: {
+ // MUBUF instructions a 12-bit offset in bytes.
+ return isUInt<12>(OffsetSize);
+ }
+ case AMDGPUAS::CONSTANT_ADDRESS: {
+ // SMRD instructions have an 8-bit offset in dwords.
+ return (OffsetSize % 4 == 0) && isUInt<8>(OffsetSize / 4);
+ }
+ case AMDGPUAS::LOCAL_ADDRESS:
+ case AMDGPUAS::REGION_ADDRESS: {
+ // The single offset versions have a 16-bit offset in bytes.
+ return isUInt<16>(OffsetSize);
+ }
+ case AMDGPUAS::PRIVATE_ADDRESS:
+ // Indirect register addressing does not use any offsets.
+ default:
+ return 0;
+ }
+}
+
bool SIInstrInfo::hasVALU32BitEncoding(unsigned Opcode) const {
return AMDGPU::getVOPe32(Opcode) != -1;
}
diff --git a/llvm/lib/Target/R600/SIInstrInfo.h b/llvm/lib/Target/R600/SIInstrInfo.h
index 9d16bc4327b..cab448ac9f5 100644
--- a/llvm/lib/Target/R600/SIInstrInfo.h
+++ b/llvm/lib/Target/R600/SIInstrInfo.h
@@ -119,6 +119,10 @@ public:
bool isImmOperandLegal(const MachineInstr *MI, unsigned OpNo,
const MachineOperand &MO) const;
+ /// \brief Return true if the given offset Size in bytes can be folded into
+ /// the immediate offsets of a memory instruction for the given address space.
+ static bool canFoldOffset(unsigned OffsetSize, unsigned AS) LLVM_READNONE;
+
/// \brief Return true if this 64-bit VALU instruction has a 32-bit encoding.
/// This function will return false if you pass it a 32-bit instruction.
bool hasVALU32BitEncoding(unsigned Opcode) const;
diff --git a/llvm/test/CodeGen/R600/shl_add_ptr.ll b/llvm/test/CodeGen/R600/shl_add_ptr.ll
new file mode 100644
index 00000000000..b6197c9932c
--- /dev/null
+++ b/llvm/test/CodeGen/R600/shl_add_ptr.ll
@@ -0,0 +1,286 @@
+; XFAIL: *
+; Enable when patch to perform shl + add constant generic DAG combiner patch is in.
+
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI %s
+
+; Test that doing a shift of a pointer with a constant add will be
+; folded into the constant offset addressing mode even if the add has
+; multiple uses. This is relevant to accessing 2 separate, adjacent
+; LDS globals.
+
+
+declare i32 @llvm.r600.read.tidig.x() #1
+
+@lds0 = addrspace(3) global [512 x float] zeroinitializer, align 4
+@lds1 = addrspace(3) global [512 x float] zeroinitializer, align 4
+
+
+; Make sure the (add tid, 2) << 2 gets folded into the ds's offset as (tid << 2) + 8
+
+; SI-LABEL: @load_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x8, [M0]
+; SI: S_ENDPGM
+define void @load_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %val0 = load float addrspace(3)* %arrayidx0, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ store float %val0, float addrspace(1)* %out
+ ret void
+}
+
+; Make sure once the first use is folded into the addressing mode, the
+; remaining add use goes through the normal shl + add constant fold.
+
+; SI-LABEL: @load_shl_base_lds_1
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_READ_B32 [[RESULT:v[0-9]+]], [[PTR]], 0x8, [M0]
+; SI: V_ADD_I32_e32 [[ADDUSE:v[0-9]+]], 8, v{{[0-9]+}}
+; SI-DAG: BUFFER_STORE_DWORD [[RESULT]]
+; SI-DAG: BUFFER_STORE_DWORD [[ADDUSE]]
+; SI: S_ENDPGM
+define void @load_shl_base_lds_1(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %val0 = load float addrspace(3)* %arrayidx0, align 4
+ %shl_add_use = shl i32 %idx.0, 2
+ store i32 %shl_add_use, i32 addrspace(1)* %add_use, align 4
+ store float %val0, float addrspace(1)* %out
+ ret void
+}
+
+@maxlds = addrspace(3) global [65536 x i8] zeroinitializer, align 4
+
+; SI-LABEL: @load_shl_base_lds_max_offset
+; SI: DS_READ_U8 v{{[0-9]+}}, v{{[0-9]+}}, 0xffff
+; SI: S_ENDPGM
+define void @load_shl_base_lds_max_offset(i8 addrspace(1)* %out, i8 addrspace(3)* %lds, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 65535
+ %arrayidx0 = getelementptr inbounds [65536 x i8] addrspace(3)* @maxlds, i32 0, i32 %idx.0
+ %val0 = load i8 addrspace(3)* %arrayidx0
+ store i32 %idx.0, i32 addrspace(1)* %add_use
+ store i8 %val0, i8 addrspace(1)* %out
+ ret void
+}
+
+; The two globals are placed adjacent in memory, so the same base
+; pointer can be used with an offset into the second one.
+
+; SI-LABEL: @load_shl_base_lds_2
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x100, [M0]
+; SI-NEXT: DS_READ_B32 {{v[0-9]+}}, [[PTR]], 0x900, [M0]
+; SI: S_ENDPGM
+define void @load_shl_base_lds_2(float addrspace(1)* %out) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 64
+ %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %val0 = load float addrspace(3)* %arrayidx0, align 4
+ %arrayidx1 = getelementptr inbounds [512 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
+ %val1 = load float addrspace(3)* %arrayidx1, align 4
+ %sum = fadd float %val0, %val1
+ store float %sum, float addrspace(1)* %out, align 4
+ ret void
+}
+
+; SI-LABEL: @store_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_WRITE_B32 [[PTR]], {{v[0-9]+}}, 0x8 [M0]
+; SI: S_ENDPGM
+define void @store_shl_base_lds_0(float addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ store float 1.0, float addrspace(3)* %arrayidx0, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+
+; --------------------------------------------------------------------------------
+; Atomics.
+
+@lds2 = addrspace(3) global [512 x i32] zeroinitializer, align 4
+
+; define void @atomic_load_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+; %idx.0 = add nsw i32 %tid.x, 2
+; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+; %val = load atomic i32 addrspace(3)* %arrayidx0 seq_cst, align 4
+; store i32 %val, i32 addrspace(1)* %out, align 4
+; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+; ret void
+; }
+
+
+; SI-LABEL: @atomic_cmpxchg_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_CMPST_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_cmpxchg_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use, i32 %swap) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %pair = cmpxchg i32 addrspace(3)* %arrayidx0, i32 7, i32 %swap seq_cst monotonic
+ %result = extractvalue { i32, i1 } %pair, 0
+ store i32 %result, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_swap_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_WRXCHG_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_swap_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw xchg i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_add_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_ADD_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_add_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw add i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_sub_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_SUB_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_sub_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw sub i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_and_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_AND_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_and_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw and i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_or_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_OR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_or_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw or i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_xor_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_XOR_RTN_B32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_xor_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw xor i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; define void @atomic_nand_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+; %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+; %idx.0 = add nsw i32 %tid.x, 2
+; %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+; %val = atomicrmw nand i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+; store i32 %val, i32 addrspace(1)* %out, align 4
+; store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+; ret void
+; }
+
+; SI-LABEL: @atomic_min_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_MIN_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_min_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw min i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_max_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_MAX_RTN_I32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_max_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw max i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_umin_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_MIN_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_umin_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw umin i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+; SI-LABEL: @atomic_umax_shl_base_lds_0
+; SI: V_LSHLREV_B32_e32 [[PTR:v[0-9]+]], 2, {{v[0-9]+}}
+; SI: DS_MAX_RTN_U32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}}, 0x8
+; SI: S_ENDPGM
+define void @atomic_umax_shl_base_lds_0(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
+ %tid.x = tail call i32 @llvm.r600.read.tidig.x() #1
+ %idx.0 = add nsw i32 %tid.x, 2
+ %arrayidx0 = getelementptr inbounds [512 x i32] addrspace(3)* @lds2, i32 0, i32 %idx.0
+ %val = atomicrmw umax i32 addrspace(3)* %arrayidx0, i32 3 seq_cst
+ store i32 %val, i32 addrspace(1)* %out, align 4
+ store i32 %idx.0, i32 addrspace(1)* %add_use, align 4
+ ret void
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }
OpenPOWER on IntegriCloud