summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms/InstCombine/atomic.ll
diff options
context:
space:
mode:
authorPhilip Reames <listmail@philipreames.com>2016-04-22 20:33:48 +0000
committerPhilip Reames <listmail@philipreames.com>2016-04-22 20:33:48 +0000
commiteedef73b633496ca52af265b472519f0e385a7d2 (patch)
tree7bf4e6df908b6b48c02f6549c448f8ff4129c27c /llvm/test/Transforms/InstCombine/atomic.ll
parent629d12de70959f49f0b8f78eb9e6e217103a24c7 (diff)
downloadbcm5719-llvm-eedef73b633496ca52af265b472519f0e385a7d2.tar.gz
bcm5719-llvm-eedef73b633496ca52af265b472519f0e385a7d2.zip
[unordered] Extend load/store type canonicalization to handle unordered operations
Extend the type canonicalization logic to work for unordered atomic loads and stores. Note that while this change itself is fairly simple and low risk, there's a reasonable chance this will expose problems in the backends by suddenly generating IR they wouldn't have seen before. Anything of this nature will be an existing bug in the backend (you could write an atomic float load), but this will definitely change the frequency with which such cases are encountered. If you see problems, feel free to revert this change, but please make sure you collect a test case. llvm-svn: 267210
Diffstat (limited to 'llvm/test/Transforms/InstCombine/atomic.ll')
-rw-r--r--llvm/test/Transforms/InstCombine/atomic.ll39
1 files changed, 39 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/atomic.ll b/llvm/test/Transforms/InstCombine/atomic.ll
index ac698c8425e..408bc8166e9 100644
--- a/llvm/test/Transforms/InstCombine/atomic.ll
+++ b/llvm/test/Transforms/InstCombine/atomic.ll
@@ -172,3 +172,42 @@ define i32 @test17(i1 %cnd) {
%x = load atomic i32, i32* %addr seq_cst, align 4
ret i32 %x
}
+
+declare void @clobber()
+
+define i32 @test18(float* %p) {
+; CHECK-LABEL: define i32 @test18(
+; CHECK: load atomic i32, i32* [[A:%.*]] unordered, align 4
+; CHECK: store atomic i32 [[B:%.*]], i32* [[C:%.*]] unordered, align 4
+ %x = load atomic float, float* %p unordered, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p unordered, align 4
+ ret i32 0
+}
+
+; TODO: probably also legal in this case
+define i32 @test19(float* %p) {
+; CHECK-LABEL: define i32 @test19(
+; CHECK: load atomic float, float* %p seq_cst, align 4
+; CHECK: store atomic float %x, float* %p seq_cst, align 4
+ %x = load atomic float, float* %p seq_cst, align 4
+ call void @clobber() ;; keep the load around
+ store atomic float %x, float* %p seq_cst, align 4
+ ret i32 0
+}
+
+define i32 @test20(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test20(
+; CHECK: store atomic i8* %v, i8** [[D:%.*]] unordered, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p unordered, align 4
+ ret i32 0
+}
+; TODO: probably also legal in this case
+define i32 @test21(i32** %p, i8* %v) {
+; CHECK-LABEL: define i32 @test21(
+; CHECK: store atomic i32* %cast, i32** %p monotonic, align 4
+ %cast = bitcast i8* %v to i32*
+ store atomic i32* %cast, i32** %p monotonic, align 4
+ ret i32 0
+}
OpenPOWER on IntegriCloud