summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDylan McKay <me@dylanmckay.io>2017-12-09 06:45:36 +0000
committerDylan McKay <me@dylanmckay.io>2017-12-09 06:45:36 +0000
commit80463fe64dec84e92764f9a796870f3be404455d (patch)
tree2d6e20d9dacd0d09b2c39bac665e5eb02c9bd79c
parentaae5b6907988f2ef48f242a3fc6e02761a967fd8 (diff)
downloadbcm5719-llvm-80463fe64dec84e92764f9a796870f3be404455d.tar.gz
bcm5719-llvm-80463fe64dec84e92764f9a796870f3be404455d.zip
Relax unaligned access assertion when type is byte aligned
Summary: This relaxes an assertion inside SelectionDAGBuilder which is overly restrictive on targets which have no concept of alignment (such as AVR). In these architectures, all types are aligned to 8-bits. After this, LLVM will only assert that accesses are aligned on targets which actually require alignment. This patch follows from a discussion on llvm-dev a few months ago http://llvm.1065342.n5.nabble.com/llvm-dev-Unaligned-atomic-load-store-td112815.html Reviewers: bogner, nemanjai, joerg, efriedma Reviewed By: efriedma Subscribers: efriedma, cactus, llvm-commits Differential Revision: https://reviews.llvm.org/D39946 llvm-svn: 320243
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h13
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp3
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp1
-rw-r--r--llvm/lib/Target/AVR/AVRISelLowering.cpp1
-rw-r--r--llvm/test/CodeGen/AVR/unaligned-atomic-loads.ll19
5 files changed, 35 insertions, 2 deletions
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index f9a61b8bf1a..e05e03c99c4 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1440,6 +1440,9 @@ public:
/// require a more complex expansion.
unsigned getMinCmpXchgSizeInBits() const { return MinCmpXchgSizeInBits; }
+ /// Whether the target supports unaligned atomic operations.
+ bool supportsUnalignedAtomics() const { return SupportsUnalignedAtomics; }
+
/// Whether AtomicExpandPass should automatically insert fences and reduce
/// ordering for this atomic. This should be true for most architectures with
/// weak memory ordering. Defaults to false.
@@ -1845,11 +1848,16 @@ protected:
MaxAtomicSizeInBitsSupported = SizeInBits;
}
- // Sets the minimum cmpxchg or ll/sc size supported by the backend.
+ /// Sets the minimum cmpxchg or ll/sc size supported by the backend.
void setMinCmpXchgSizeInBits(unsigned SizeInBits) {
MinCmpXchgSizeInBits = SizeInBits;
}
+ /// Sets whether unaligned atomic operations are supported.
+ void setSupportsUnalignedAtomics(bool UnalignedSupported) {
+ SupportsUnalignedAtomics = UnalignedSupported;
+ }
+
public:
//===--------------------------------------------------------------------===//
// Addressing mode description hooks (used by LSR etc).
@@ -2331,6 +2339,9 @@ private:
/// backend supports.
unsigned MinCmpXchgSizeInBits;
+ /// This indicates if the target supports unaligned atomic operations.
+ bool SupportsUnalignedAtomics;
+
/// If set to a physical register, this specifies the register that
/// llvm.savestack/llvm.restorestack should save and restore.
unsigned StackPointerRegisterToSaveRestore;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index f3addf05566..2dc9a728b65 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4138,7 +4138,8 @@ void SelectionDAGBuilder::visitAtomicLoad(const LoadInst &I) {
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
- if (I.getAlignment() < VT.getStoreSize())
+ if (!TLI.supportsUnalignedAtomics() &&
+ I.getAlignment() < VT.getStoreSize())
report_fatal_error("Cannot generate unaligned atomic load");
MachineMemOperand *MMO =
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 3c684974df8..22a43ab8ac5 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -520,6 +520,7 @@ TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm) : TM(tm) {
MaxAtomicSizeInBitsSupported = 1024;
MinCmpXchgSizeInBits = 0;
+ SupportsUnalignedAtomics = false;
std::fill(std::begin(LibcallRoutineNames), std::end(LibcallRoutineNames), nullptr);
diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp
index 890379d5639..69d97487878 100644
--- a/llvm/lib/Target/AVR/AVRISelLowering.cpp
+++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp
@@ -44,6 +44,7 @@ AVRTargetLowering::AVRTargetLowering(AVRTargetMachine &tm)
setBooleanVectorContents(ZeroOrOneBooleanContent);
setSchedulingPreference(Sched::RegPressure);
setStackPointerRegisterToSaveRestore(AVR::SP);
+ setSupportsUnalignedAtomics(true);
setOperationAction(ISD::GlobalAddress, MVT::i16, Custom);
setOperationAction(ISD::BlockAddress, MVT::i16, Custom);
diff --git a/llvm/test/CodeGen/AVR/unaligned-atomic-loads.ll b/llvm/test/CodeGen/AVR/unaligned-atomic-loads.ll
new file mode 100644
index 00000000000..db1ab33fa88
--- /dev/null
+++ b/llvm/test/CodeGen/AVR/unaligned-atomic-loads.ll
@@ -0,0 +1,19 @@
+; RUN: llc -mattr=addsubiw < %s -march=avr | FileCheck %s
+
+; This verifies that the middle end can handle an unaligned atomic load.
+;
+; In the past, an assertion inside the SelectionDAGBuilder would always
+; hit an assertion for unaligned loads and stores.
+
+%AtomicI16 = type { %CellI16, [0 x i8] }
+%CellI16 = type { i16, [0 x i8] }
+
+; CHECK-LABEL: foo
+; CHECK: ret
+define void @foo(%AtomicI16* %self) {
+start:
+ %a = getelementptr inbounds %AtomicI16, %AtomicI16* %self, i16 0, i32 0, i32 0
+ load atomic i16, i16* %a seq_cst, align 1
+ ret void
+}
+
OpenPOWER on IntegriCloud