summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AMDGPU/SIFrameLowering.cpp')
-rw-r--r--llvm/lib/Target/AMDGPU/SIFrameLowering.cpp27
1 files changed, 11 insertions, 16 deletions
diff --git a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
index 7f3150bdd01..48f5f645075 100644
--- a/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIFrameLowering.cpp
@@ -773,22 +773,17 @@ void SIFrameLowering::processFunctionBeforeFrameFinalized(
!AllSGPRSpilledToVGPRs || !allStackObjectsAreDead(MFI)) {
assert(RS && "RegScavenger required if spilling");
- // We force this to be at offset 0 so no user object ever has 0 as an
- // address, so we may use 0 as an invalid pointer value. This is because
- // LLVM assumes 0 is an invalid pointer in address space 0. Because alloca
- // is required to be address space 0, we are forced to accept this for
- // now. Ideally we could have the stack in another address space with 0 as a
- // valid pointer, and -1 as the null value.
- //
- // This will also waste additional space when user stack objects require > 4
- // byte alignment.
- //
- // The main cost here is losing the offset for addressing modes. However
- // this also ensures we shouldn't need a register for the offset when
- // emergency scavenging.
- int ScavengeFI = MFI.CreateFixedObject(
- TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
- RS->addScavengingFrameIndex(ScavengeFI);
+ if (FuncInfo->isEntryFunction()) {
+ int ScavengeFI = MFI.CreateFixedObject(
+ TRI.getSpillSize(AMDGPU::SGPR_32RegClass), 0, false);
+ RS->addScavengingFrameIndex(ScavengeFI);
+ } else {
+ int ScavengeFI = MFI.CreateStackObject(
+ TRI.getSpillSize(AMDGPU::SGPR_32RegClass),
+ TRI.getSpillAlignment(AMDGPU::SGPR_32RegClass),
+ false);
+ RS->addScavengingFrameIndex(ScavengeFI);
+ }
}
}
OpenPOWER on IntegriCloud