summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp5
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-stp.ll32
2 files changed, 35 insertions, 2 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index 186e71a3307..82f77a77ab5 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -623,7 +623,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// and first alias with the second, we can combine the second into the
// first.
if (!ModifiedRegs[MI->getOperand(0).getReg()] &&
- !UsedRegs[MI->getOperand(0).getReg()] &&
+ !(MI->mayLoad() && UsedRegs[MI->getOperand(0).getReg()]) &&
!mayAlias(MI, MemInsns, TII)) {
MergeForward = false;
return MBBI;
@@ -634,7 +634,8 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
// first and the second alias with the first, we can combine the first
// into the second.
if (!ModifiedRegs[FirstMI->getOperand(0).getReg()] &&
- !UsedRegs[FirstMI->getOperand(0).getReg()] &&
+ !(FirstMI->mayLoad() &&
+ UsedRegs[FirstMI->getOperand(0).getReg()]) &&
!mayAlias(FirstMI, MemInsns, TII)) {
MergeForward = true;
return MBBI;
diff --git a/llvm/test/CodeGen/AArch64/arm64-stp.ll b/llvm/test/CodeGen/AArch64/arm64-stp.ll
index 4d76396471a..72561aac6e8 100644
--- a/llvm/test/CodeGen/AArch64/arm64-stp.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-stp.ll
@@ -99,3 +99,35 @@ entry:
store <4 x i32> %p20, <4 x i32>* %p21, align 4
ret void
}
+
+; Read of %b to compute %tmp2 shouldn't prevent formation of stp
+; CHECK-LABEL: stp_int_rar_hazard
+; CHECK: stp w0, w1, [x2]
+; CHECK: ldr [[REG:w[0-9]+]], [x2, #8]
+; CHECK: add w0, [[REG]], w1
+; CHECK: ret
+define i32 @stp_int_rar_hazard(i32 %a, i32 %b, i32* nocapture %p) nounwind {
+ store i32 %a, i32* %p, align 4
+ %ld.ptr = getelementptr inbounds i32, i32* %p, i64 2
+ %tmp = load i32, i32* %ld.ptr, align 4
+ %tmp2 = add i32 %tmp, %b
+ %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ store i32 %b, i32* %add.ptr, align 4
+ ret i32 %tmp2
+}
+
+; Read of %b to compute %tmp2 shouldn't prevent formation of stp
+; CHECK-LABEL: stp_int_rar_hazard_after
+; CHECK: ldr [[REG:w[0-9]+]], [x3, #4]
+; CHECK: add w0, [[REG]], w2
+; CHECK: stp w1, w2, [x3]
+; CHECK: ret
+define i32 @stp_int_rar_hazard_after(i32 %w0, i32 %a, i32 %b, i32* nocapture %p) nounwind {
+ store i32 %a, i32* %p, align 4
+ %ld.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ %tmp = load i32, i32* %ld.ptr, align 4
+ %tmp2 = add i32 %tmp, %b
+ %add.ptr = getelementptr inbounds i32, i32* %p, i64 1
+ store i32 %b, i32* %add.ptr, align 4
+ ret i32 %tmp2
+}
OpenPOWER on IntegriCloud