summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
diff options
context:
space:
mode:
authorRafael Espindola <rafael.espindola@gmail.com>2018-02-07 18:09:35 +0000
committerRafael Espindola <rafael.espindola@gmail.com>2018-02-07 18:09:35 +0000
commitf4e3f3e31cacd412bcef0906da3c6a4bd503d0a2 (patch)
tree7acf9b28cd39d6d52948a9e69f9e01d386d31d8c /llvm/lib/Target/AMDGPU/SIISelLowering.cpp
parent36df7631b44cb7f8babae9bdea08a982fc9a4c40 (diff)
downloadbcm5719-llvm-f4e3f3e31cacd412bcef0906da3c6a4bd503d0a2.tar.gz
bcm5719-llvm-f4e3f3e31cacd412bcef0906da3c6a4bd503d0a2.zip
Revert "AMDGPU: Add 32-bit constant address space"
This reverts commit r324487. It broke clang tests. llvm-svn: 324494
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIISelLowering.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp30
1 files changed, 9 insertions, 21 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 6361c2c9ea9..83fe7e377bb 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -900,8 +900,7 @@ bool SITargetLowering::isLegalAddressingMode(const DataLayout &DL,
if (AS == AMDGPUASI.GLOBAL_ADDRESS)
return isLegalGlobalAddressingMode(AM);
- if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
// If the offset isn't a multiple of 4, it probably isn't going to be
// correctly aligned.
// FIXME: Can we get the real alignment here?
@@ -1024,8 +1023,7 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
// If we have an uniform constant load, it still requires using a slow
// buffer instruction if unaligned.
if (IsFast) {
- *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS ||
- AddrSpace == AMDGPUASI.CONSTANT_ADDRESS_32BIT) ?
+ *IsFast = (AddrSpace == AMDGPUASI.CONSTANT_ADDRESS) ?
(Align % 4 == 0) : true;
}
@@ -1068,8 +1066,7 @@ EVT SITargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
static bool isFlatGlobalAddrSpace(unsigned AS, AMDGPUAS AMDGPUASI) {
return AS == AMDGPUASI.GLOBAL_ADDRESS ||
AS == AMDGPUASI.FLAT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT;
+ AS == AMDGPUASI.CONSTANT_ADDRESS;
}
bool SITargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
@@ -4011,15 +4008,13 @@ void SITargetLowering::createDebuggerPrologueStackObjects(
bool SITargetLowering::shouldEmitFixup(const GlobalValue *GV) const {
const Triple &TT = getTargetMachine().getTargetTriple();
- return (GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
- GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
+ return GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS &&
AMDGPU::shouldEmitConstantsToTextSection(TT);
}
bool SITargetLowering::shouldEmitGOTReloc(const GlobalValue *GV) const {
return (GV->getType()->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
- GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
- GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
+ GV->getType()->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
!shouldEmitFixup(GV) &&
!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
}
@@ -4396,8 +4391,7 @@ bool
SITargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
// We can fold offsets for anything that doesn't require a GOT relocation.
return (GA->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS ||
- GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS ||
- GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT) &&
+ GA->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) &&
!shouldEmitGOTReloc(GA->getGlobal());
}
@@ -4450,7 +4444,6 @@ SDValue SITargetLowering::LowerGlobalAddress(AMDGPUMachineFunction *MFI,
const GlobalValue *GV = GSD->getGlobal();
if (GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
- GSD->getAddressSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT &&
GSD->getAddressSpace() != AMDGPUASI.GLOBAL_ADDRESS &&
// FIXME: It isn't correct to rely on the type of the pointer. This should
// be removed when address space 0 is 64-bit.
@@ -5385,8 +5378,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
AMDGPUASI.PRIVATE_ADDRESS : AMDGPUASI.GLOBAL_ADDRESS;
unsigned NumElements = MemVT.getVectorNumElements();
- if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT) {
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS) {
if (isMemOpUniform(Load))
return SDValue();
// Non-uniform loads will be selected to MUBUF instructions, so they
@@ -5394,9 +5386,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// loads.
//
}
- if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
- AS == AMDGPUASI.GLOBAL_ADDRESS) {
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS) {
if (Subtarget->getScalarizeGlobalBehavior() && isMemOpUniform(Load) &&
!Load->isVolatile() && isMemOpHasNoClobberedMemOperand(Load))
return SDValue();
@@ -5405,9 +5395,7 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
// loads.
//
}
- if (AS == AMDGPUASI.CONSTANT_ADDRESS ||
- AS == AMDGPUASI.CONSTANT_ADDRESS_32BIT ||
- AS == AMDGPUASI.GLOBAL_ADDRESS ||
+ if (AS == AMDGPUASI.CONSTANT_ADDRESS || AS == AMDGPUASI.GLOBAL_ADDRESS ||
AS == AMDGPUASI.FLAT_ADDRESS) {
if (NumElements > 4)
return SplitVectorLoad(Op, DAG);
OpenPOWER on IntegriCloud