summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2019-02-28 18:17:51 +0000
committerPhilip Reames <listmail@philipreames.com>2019-02-28 18:17:51 +0000
commit9915b1fa4aa520c4e4d73f0707fc743c4dc08933 (patch)
tree789d58aea57655974bdbb3e83e3baf72b5a4969c
parent63a67527a42e1f9adda9ca2d7c404b8d5640d3e9 (diff)
downloadbcm5719-llvm-9915b1fa4aa520c4e4d73f0707fc743c4dc08933.tar.gz
bcm5719-llvm-9915b1fa4aa520c4e4d73f0707fc743c4dc08933.zip
[Tests] Strengthen LICM test corpus to show alignment striping. (part 2)
This should have been part of r355110, but my brain isn't quite awake yet, despite the coffee. Per the original submit comment... Doing scalar promotion w/o being able to prove the alignment of the hoisted load or sunk store is a bug. Update tests to actually show the alignment so that impact of the patch which fixes this can be seen. llvm-svn: 355111
-rw-r--r--llvm/test/Transforms/LICM/scalar-promote-unwind.ll31
1 files changed, 31 insertions, 0 deletions
diff --git a/llvm/test/Transforms/LICM/scalar-promote-unwind.ll b/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
index bb90a4f821c..697222d4394 100644
--- a/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
+++ b/llvm/test/Transforms/LICM/scalar-promote-unwind.ll
@@ -67,6 +67,8 @@ for.cond.cleanup:
ret void
}
+;; We can promote if the load can be proven safe to speculate, and the
+;; store safe to sink, even if the the store *isn't* must execute.
define void @test3(i1 zeroext %y) uwtable {
; CHECK-LABEL: @test3
entry:
@@ -93,6 +95,35 @@ for.cond.cleanup:
ret void
}
+;; Same as test3, but with unordered atomics
+;; FIXME: doing the transform w/o alignment here is wrong since we're
+;; creating an unaligned atomic which we may not be able to lower.
+define void @test3b(i1 zeroext %y) uwtable {
+; CHECK-LABEL: @test3
+entry:
+; CHECK-LABEL: entry:
+; CHECK-NEXT: %a = alloca i32
+; CHECK-NEXT: %a.promoted = load atomic i32, i32* %a unordered, align 1
+ %a = alloca i32
+ br label %for.body
+
+for.body:
+ %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ]
+ %0 = load atomic i32, i32* %a unordered, align 4
+ %add = add nsw i32 %0, 1
+ tail call void @f()
+ store atomic i32 %add, i32* %a unordered, align 4
+ %inc = add nuw nsw i32 %i.03, 1
+ %exitcond = icmp eq i32 %inc, 10000
+ br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:
+; CHECK-LABEL: for.cond.cleanup:
+; CHECK: store atomic i32 %add.lcssa, i32* %a unordered, align 1
+; CHECK-NEXT: ret void
+ ret void
+}
+
@_ZTIi = external constant i8*
; In this test, the loop is within a try block. There is an explicit unwind edge out of the loop.
OpenPOWER on IntegriCloud