summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
authorJohn Brawn <john.brawn@arm.com>2018-03-23 14:47:07 +0000
committerJohn Brawn <john.brawn@arm.com>2018-03-23 14:47:07 +0000
commite3b44f9de6b6724e147cc7577f18ef500d88c754 (patch)
tree88791a019e5706ff62eb20b3b9fd192135ad98d1 /llvm/lib/Target
parent5792e10ffb07891e6e79bd6a5dbc0863ce13074c (diff)
downloadbcm5719-llvm-e3b44f9de6b6724e147cc7577f18ef500d88c754.tar.gz
bcm5719-llvm-e3b44f9de6b6724e147cc7577f18ef500d88c754.zip
[AArch64] Don't reduce the width of loads if it prevents combining a shift
Loads and stores can only shift the offset register by the size of the value being loaded, but currently the DAGCombiner will reduce the width of the load if it's followed by a trunc making it impossible to later combine the shift. Solve this by implementing shouldReduceLoadWidth for the AArch64 backend and make it prevent the width reduction if this is what would happen, though do allow it if reducing the load width will let us eliminate a later sign or zero extend. Differential Revision: https://reviews.llvm.org/D44794 llvm-svn: 328321
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp27
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h3
2 files changed, 30 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b566810520a..e47e71f0ae1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7657,6 +7657,33 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return false;
}
+bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
+ ISD::LoadExtType ExtTy,
+ EVT NewVT) const {
+ // If we're reducing the load width in order to avoid having to use an extra
+ // instruction to do extension then it's probably a good idea.
+ if (ExtTy != ISD::NON_EXTLOAD)
+ return true;
+ // Don't reduce load width if it would prevent us from combining a shift into
+ // the offset.
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
+ assert(Mem);
+ const SDValue &Base = Mem->getBasePtr();
+ if (Base.getOpcode() == ISD::ADD &&
+ Base.getOperand(1).getOpcode() == ISD::SHL &&
+ Base.getOperand(1).hasOneUse() &&
+ Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
+ // The shift can be combined if it matches the size of the value being
+ // loaded (and so reducing the width would make it not match).
+ uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
+ uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
+ if (ShiftAmount == Log2_32(LoadBytes))
+ return false;
+ }
+ // We have no reason to disallow reducing the load width, so allow it.
+ return true;
+}
+
// Truncations from 64-bit GPR to 32-bit GPR is free.
bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 297c9bdd251..5754ed97380 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -309,6 +309,9 @@ public:
MachineFunction &MF,
unsigned Intrinsic) const override;
+ bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
+ EVT NewVT) const override;
+
bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
bool isTruncateFree(EVT VT1, EVT VT2) const override;
OpenPOWER on IntegriCloud