diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-06-12 17:14:03 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2019-06-12 17:14:03 +0000 |
| commit | 4e0648a541f7a1da3cbda804d9cbce04c0843c30 (patch) | |
| tree | 622be785880c58ae1126027b83973577ef2909cf /llvm/lib/Target/AArch64 | |
| parent | 5b0e0dd709f96cb987cd3178217517b45ee1febb (diff) | |
| download | bcm5719-llvm-4e0648a541f7a1da3cbda804d9cbce04c0843c30.tar.gz bcm5719-llvm-4e0648a541f7a1da3cbda804d9cbce04c0843c30.zip | |
[TargetLowering] Add MachineMemOperand::Flags to allowsMemoryAccess tests (PR42123)
As discussed on D62910, we need to check whether particular types of memory access are allowed, not just their alignment/address-space.
This NFC patch adds a MachineMemOperand::Flags argument to allowsMemoryAccess and allowsMisalignedMemoryAccesses, and wires up calls to pass the relevant flags to them.
If people are happy with this approach I can then update X86TargetLowering::allowsMisalignedMemoryAccesses to handle misaligned NT load/stores.
Differential Revision: https://reviews.llvm.org/D63075
llvm-svn: 363179
Diffstat (limited to 'llvm/lib/Target/AArch64')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 14 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.h | 7 |
2 files changed, 12 insertions, 9 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index ba8bbd25159..60032cd797a 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1074,10 +1074,9 @@ MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, return MVT::i64; } -bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, - unsigned AddrSpace, - unsigned Align, - bool *Fast) const { +bool AArch64TargetLowering::allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace, unsigned Align, MachineMemOperand::Flags Flags, + bool *Fast) const { if (Subtarget->requiresStrictAlign()) return false; @@ -2843,7 +2842,8 @@ SDValue AArch64TargetLowering::LowerSTORE(SDValue Op, unsigned AS = StoreNode->getAddressSpace(); unsigned Align = StoreNode->getAlignment(); if (Align < MemVT.getStoreSize() && - !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { + !allowsMisalignedMemoryAccesses( + MemVT, AS, Align, StoreNode->getMemOperand()->getFlags(), nullptr)) { return scalarizeVectorStore(StoreNode, DAG); } @@ -8716,7 +8716,9 @@ EVT AArch64TargetLowering::getOptimalMemOpType( if (memOpAlign(SrcAlign, DstAlign, AlignCheck)) return true; bool Fast; - return allowsMisalignedMemoryAccesses(VT, 0, 1, &Fast) && Fast; + return allowsMisalignedMemoryAccesses(VT, 0, 1, MachineMemOperand::MONone, + &Fast) && + Fast; }; if (CanUseNEON && IsMemset && !IsSmallMemset && diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index c6aab742ab4..94793c678ae 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -262,9 +262,10 @@ public: /// Returns true if the target allows unaligned memory accesses of the /// specified type. - bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace = 0, - unsigned Align = 1, - bool *Fast = nullptr) const override; + bool allowsMisalignedMemoryAccesses( + EVT VT, unsigned AddrSpace = 0, unsigned Align = 1, + MachineMemOperand::Flags Flags = MachineMemOperand::MONone, + bool *Fast = nullptr) const override; /// Provide custom lowering hooks for some operations. SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; |

