summaryrefslogtreecommitdiffstats
path: root/llvm/lib/CodeGen
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2016-02-22 22:01:42 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2016-02-22 22:01:42 +0000
commit0c6bd7b0d31510225e195cc440234e4cf6bea29f (patch)
tree74a147c221b814c0031b83c8d716aff9bca5a94d /llvm/lib/CodeGen
parent27e3b8a6e3a5c75a0e23d18a7bb3f32bce851db8 (diff)
downloadbcm5719-llvm-0c6bd7b0d31510225e195cc440234e4cf6bea29f.tar.gz
bcm5719-llvm-0c6bd7b0d31510225e195cc440234e4cf6bea29f.zip
SelectionDAG: Use correct addrspace when lowering memcpy
This was causing assertions later from using the wrong pointer size with LDS operations. getOptimalMemOpType should also have address space arguments later. This avoids assertions in existing tests exposed by a future commit. llvm-svn: 261580
Diffstat (limited to 'llvm/lib/CodeGen')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp25
1 files changed, 16 insertions, 9 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index f75f962850a..e1790ce2083 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -4189,6 +4189,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
bool ZeroMemset,
bool MemcpyStrSrc,
bool AllowOverlap,
+ unsigned DstAS, unsigned SrcAS,
SelectionDAG &DAG,
const TargetLowering &TLI) {
assert((SrcAlign == 0 || SrcAlign >= DstAlign) &&
@@ -4205,10 +4206,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
DAG.getMachineFunction());
if (VT == MVT::Other) {
- unsigned AS = 0;
- if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(AS) ||
- TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign)) {
- VT = TLI.getPointerTy(DAG.getDataLayout());
+ if (DstAlign >= DAG.getDataLayout().getPointerPrefAlignment(DstAS) ||
+ TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign)) {
+ VT = TLI.getPointerTy(DAG.getDataLayout(), DstAS);
} else {
switch (DstAlign & 7) {
case 0: VT = MVT::i64; break;
@@ -4264,10 +4264,9 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
// FIXME: Only does this for 64-bit or more since we don't have proper
// cost model for unaligned load / store.
bool Fast;
- unsigned AS = 0;
if (NumMemOps && AllowOverlap &&
VTSize >= 8 && NewVTSize < Size &&
- TLI.allowsMisalignedMemoryAccesses(VT, AS, DstAlign, &Fast) && Fast)
+ TLI.allowsMisalignedMemoryAccesses(VT, DstAS, DstAlign, &Fast) && Fast)
VTSize = Size;
else {
VT = NewVT;
@@ -4328,7 +4327,10 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align),
(isZeroStr ? 0 : SrcAlign),
- false, false, CopyFromStr, true, DAG, TLI))
+ false, false, CopyFromStr, true,
+ DstPtrInfo.getAddrSpace(),
+ SrcPtrInfo.getAddrSpace(),
+ DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
@@ -4437,7 +4439,10 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, SDLoc dl,
if (!FindOptimalMemOpLowering(MemOps, Limit, Size,
(DstAlignCanChange ? 0 : Align), SrcAlign,
- false, false, false, false, DAG, TLI))
+ false, false, false, false,
+ DstPtrInfo.getAddrSpace(),
+ SrcPtrInfo.getAddrSpace(),
+ DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
@@ -4528,7 +4533,9 @@ static SDValue getMemsetStores(SelectionDAG &DAG, SDLoc dl,
isa<ConstantSDNode>(Src) && cast<ConstantSDNode>(Src)->isNullValue();
if (!FindOptimalMemOpLowering(MemOps, TLI.getMaxStoresPerMemset(OptSize),
Size, (DstAlignCanChange ? 0 : Align), 0,
- true, IsZeroVal, false, true, DAG, TLI))
+ true, IsZeroVal, false, true,
+ DstPtrInfo.getAddrSpace(), ~0u,
+ DAG, TLI))
return SDValue();
if (DstAlignCanChange) {
OpenPOWER on IntegriCloud