summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target/AArch64/AArch64ISelLowering.cpp')
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp27
1 files changed, 27 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index b566810520a..e47e71f0ae1 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -7657,6 +7657,33 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
return false;
}
+bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load,
+ ISD::LoadExtType ExtTy,
+ EVT NewVT) const {
+ // If we're reducing the load width in order to avoid having to use an extra
+ // instruction to do extension then it's probably a good idea.
+ if (ExtTy != ISD::NON_EXTLOAD)
+ return true;
+ // Don't reduce load width if it would prevent us from combining a shift into
+ // the offset.
+ MemSDNode *Mem = dyn_cast<MemSDNode>(Load);
+ assert(Mem);
+ const SDValue &Base = Mem->getBasePtr();
+ if (Base.getOpcode() == ISD::ADD &&
+ Base.getOperand(1).getOpcode() == ISD::SHL &&
+ Base.getOperand(1).hasOneUse() &&
+ Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) {
+ // The shift can be combined if it matches the size of the value being
+ // loaded (and so reducing the width would make it not match).
+ uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1);
+ uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8;
+ if (ShiftAmount == Log2_32(LoadBytes))
+ return false;
+ }
+ // We have no reason to disallow reducing the load width, so allow it.
+ return true;
+}
+
// Truncations from 64-bit GPR to 32-bit GPR is free.
bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
OpenPOWER on IntegriCloud