summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp69
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp44
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp23
3 files changed, 72 insertions, 64 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 3fe663630d5..104bd8e237b 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -10746,17 +10746,6 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
return true;
}
-static bool allowableAlignment(const SelectionDAG &DAG,
- const TargetLowering &TLI, EVT EVTTy,
- unsigned AS, unsigned Align) {
- if (TLI.allowsMisalignedMemoryAccesses(EVTTy, AS, Align))
- return true;
-
- Type *Ty = EVTTy.getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DAG.getDataLayout().getPrefTypeAlignment(Ty);
- return (Align >= ABIAlignment);
-}
-
void DAGCombiner::getStoreMergeAndAliasCandidates(
StoreSDNode* St, SmallVectorImpl<MemOpLink> &StoreNodes,
SmallVectorImpl<LSBaseSDNode*> &AliasLoadNodes) {
@@ -10922,6 +10911,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
unsigned FirstStoreAS = FirstInChain->getAddressSpace();
unsigned FirstStoreAlign = FirstInChain->getAlignment();
+ LLVMContext &Context = *DAG.getContext();
+ const DataLayout &DL = DAG.getDataLayout();
// Store the constants into memory as one consecutive store.
if (IsConstantSrc) {
@@ -10943,27 +10934,28 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// Find a legal type for the constant store.
unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
- EVT StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
+ EVT StoreTy = EVT::getIntegerVT(Context, SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
- allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS,
- FirstStoreAlign)) {
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign)) {
LastLegalType = i+1;
// Or check whether a truncstore is legal.
- } else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
+ } else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
- TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
+ TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
- allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstStoreAS,
- FirstStoreAlign)) {
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstStoreAS, FirstStoreAlign)) {
LastLegalType = i + 1;
}
}
// Find a legal type for the vector store.
- EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
+ EVT Ty = EVT::getVectorVT(Context, MemVT, i+1);
if (TLI.isTypeLegal(Ty) &&
- allowableAlignment(DAG, TLI, Ty, FirstStoreAS, FirstStoreAlign)) {
+ TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
+ FirstStoreAlign)) {
LastLegalVectorType = i + 1;
}
}
@@ -11007,9 +10999,10 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
return false;
// Find a legal type for the vector store.
- EVT Ty = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
+ EVT Ty = EVT::getVectorVT(Context, MemVT, i+1);
if (TLI.isTypeLegal(Ty) &&
- allowableAlignment(DAG, TLI, Ty, FirstStoreAS, FirstStoreAlign))
+ TLI.allowsMemoryAccess(Context, DL, Ty, FirstStoreAS,
+ FirstStoreAlign))
NumElem = i + 1;
}
@@ -11097,33 +11090,37 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
LastConsecutiveLoad = i;
// Find a legal type for the vector store.
- EVT StoreTy = EVT::getVectorVT(*DAG.getContext(), MemVT, i+1);
+ EVT StoreTy = EVT::getVectorVT(Context, MemVT, i+1);
if (TLI.isTypeLegal(StoreTy) &&
- allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
- allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign)) {
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign)) {
LastLegalVectorType = i + 1;
}
// Find a legal type for the integer store.
unsigned SizeInBits = (i+1) * ElementSizeBytes * 8;
- StoreTy = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
+ StoreTy = EVT::getIntegerVT(Context, SizeInBits);
if (TLI.isTypeLegal(StoreTy) &&
- allowableAlignment(DAG, TLI, StoreTy, FirstStoreAS, FirstStoreAlign) &&
- allowableAlignment(DAG, TLI, StoreTy, FirstLoadAS, FirstLoadAlign))
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstStoreAS,
+ FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign))
LastLegalIntegerType = i + 1;
// Or check whether a truncstore and extload is legal.
- else if (TLI.getTypeAction(*DAG.getContext(), StoreTy) ==
+ else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
- TLI.getTypeToTransformTo(*DAG.getContext(), StoreTy);
+ TLI.getTypeToTransformTo(Context, StoreTy);
if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::SEXTLOAD, LegalizedStoredValueTy, StoreTy) &&
TLI.isLoadExtLegal(ISD::EXTLOAD, LegalizedStoredValueTy, StoreTy) &&
- allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstStoreAS,
- FirstStoreAlign) &&
- allowableAlignment(DAG, TLI, LegalizedStoredValueTy, FirstLoadAS,
- FirstLoadAlign))
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstStoreAS, FirstStoreAlign) &&
+ TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
+ FirstLoadAS, FirstLoadAlign))
LastLegalIntegerType = i+1;
}
}
@@ -11158,10 +11155,10 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode* St) {
// to memory.
EVT JointMemOpVT;
if (UseVectorTy) {
- JointMemOpVT = EVT::getVectorVT(*DAG.getContext(), MemVT, NumElem);
+ JointMemOpVT = EVT::getVectorVT(Context, MemVT, NumElem);
} else {
unsigned SizeInBits = NumElem * ElementSizeBytes * 8;
- JointMemOpVT = EVT::getIntegerVT(*DAG.getContext(), SizeInBits);
+ JointMemOpVT = EVT::getIntegerVT(Context, SizeInBits);
}
SDLoc LoadDL(LoadNodes[0].MemNode);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 0396736ad81..a253faa9760 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -727,14 +727,12 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
case TargetLowering::Legal: {
// If this is an unaligned store and the target doesn't support it,
// expand it.
+ EVT MemVT = ST->getMemoryVT();
unsigned AS = ST->getAddressSpace();
unsigned Align = ST->getAlignment();
- if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
- Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
- if (Align < ABIAlignment)
- ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
- }
+ const DataLayout &DL = DAG.getDataLayout();
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align))
+ ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
break;
}
case TargetLowering::Custom: {
@@ -842,16 +840,13 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
StVT.getSimpleVT())) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
+ EVT MemVT = ST->getMemoryVT();
unsigned AS = ST->getAddressSpace();
unsigned Align = ST->getAlignment();
// If this is an unaligned store and the target doesn't support it,
// expand it.
- if (!TLI.allowsMisalignedMemoryAccesses(ST->getMemoryVT(), AS, Align)) {
- Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DL.getABITypeAlignment(Ty);
- if (Align < ABIAlignment)
- ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
- }
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align))
+ ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
break;
}
case TargetLowering::Custom: {
@@ -894,17 +889,14 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
switch (TLI.getOperationAction(Node->getOpcode(), VT)) {
default: llvm_unreachable("This action is not supported yet!");
case TargetLowering::Legal: {
+ EVT MemVT = LD->getMemoryVT();
unsigned AS = LD->getAddressSpace();
unsigned Align = LD->getAlignment();
+ const DataLayout &DL = DAG.getDataLayout();
// If this is an unaligned load and the target doesn't support it,
// expand it.
- if (!TLI.allowsMisalignedMemoryAccesses(LD->getMemoryVT(), AS, Align)) {
- Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
- if (Align < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
- }
- }
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align))
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
break;
}
case TargetLowering::Custom: {
@@ -1091,18 +1083,14 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Chain = Res.getValue(1);
}
} else {
- // If this is an unaligned load and the target doesn't support
- // it, expand it.
+ // If this is an unaligned load and the target doesn't support it,
+ // expand it.
EVT MemVT = LD->getMemoryVT();
unsigned AS = LD->getAddressSpace();
unsigned Align = LD->getAlignment();
- if (!TLI.allowsMisalignedMemoryAccesses(MemVT, AS, Align)) {
- Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
- unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
- if (Align < ABIAlignment){
- ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
- }
- }
+ const DataLayout &DL = DAG.getDataLayout();
+ if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT, AS, Align))
+ ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, Value, Chain);
}
break;
}
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index e6d07f5134b..ee50ddd9fa0 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -1530,6 +1530,29 @@ unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
return DL.getABITypeAlignment(Ty);
}
+bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
+ const DataLayout &DL, EVT VT,
+ unsigned AddrSpace,
+ unsigned Alignment,
+ bool *Fast) const {
+ // Check if the specified alignment is sufficient based on the data layout.
+ // TODO: While using the data layout works in practice, a better solution
+ // would be to implement this check directly (make this a virtual function).
+ // For example, the ABI alignment may change based on software platform while
+ // this function should only be affected by hardware implementation.
+ Type *Ty = VT.getTypeForEVT(Context);
+ if (Alignment >= DL.getABITypeAlignment(Ty)) {
+ // Assume that an access that meets the ABI-specified alignment is fast.
+ if (Fast != nullptr)
+ *Fast = true;
+ return true;
+ }
+
+ // This is a misaligned access.
+ return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
+}
+
+
//===----------------------------------------------------------------------===//
// TargetTransformInfo Helpers
//===----------------------------------------------------------------------===//
OpenPOWER on IntegriCloud