summaryrefslogtreecommitdiffstats
path: root/llvm/test/Transforms
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2014-10-20 00:24:14 +0000
committerChandler Carruth <chandlerc@gmail.com>2014-10-20 00:24:14 +0000
commiteeec35ae1cce76d8f0d4fc35d231deebf2fa5f38 (patch)
tree462f5cd7ee8c5342985ae9ef5f0d08622153bd40 /llvm/test/Transforms
parent25d50758f3078f56677f50b8f789dff76c90ed4c (diff)
downloadbcm5719-llvm-eeec35ae1cce76d8f0d4fc35d231deebf2fa5f38.tar.gz
bcm5719-llvm-eeec35ae1cce76d8f0d4fc35d231deebf2fa5f38.zip
Teach the load analysis driving core instcombine logic and other bits of
logic to look through pointer casts, making them trivially stronger in the face of loads and stores with intervening pointer casts. I've included a few test cases that demonstrate the kind of folding instcombine can do without pointer casts and then variations which obfuscate the logic through bitcasts. Without this patch, the variations all fail to optimize fully. This is more important now than it has been in the past as I've started moving the load canonicialization to more closely follow the value type requirements rather than the pointer type requirements and thus this needs to be prepared for more pointer casts. When I made the same change to stores several test cases regressed without logic along these lines so I wanted to systematically improve matters first. llvm-svn: 220178
Diffstat (limited to 'llvm/test/Transforms')
-rw-r--r--llvm/test/Transforms/InstCombine/select.ll109
1 files changed, 109 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/select.ll b/llvm/test/Transforms/InstCombine/select.ll
index 05f65efc033..6cf9f0f6b4d 100644
--- a/llvm/test/Transforms/InstCombine/select.ll
+++ b/llvm/test/Transforms/InstCombine/select.ll
@@ -1276,3 +1276,112 @@ define i32 @test77(i1 %flag, i32* %x) {
%v = load i32* %p
ret i32 %v
}
+
+define i32 @test78(i1 %flag, i32* %x, i32* %y, i32* %z) {
+; Test that we can speculate the loads around the select even when we can't
+; fold the load completely away.
+; CHECK-LABEL: @test78(
+; CHECK: %[[V1:.*]] = load i32* %x
+; CHECK-NEXT: %[[V2:.*]] = load i32* %y
+; CHECK-NEXT: %[[S:.*]] = select i1 %flag, i32 %[[V1]], i32 %[[V2]]
+; CHECK-NEXT: ret i32 %[[S]]
+entry:
+ store i32 0, i32* %x
+ store i32 0, i32* %y
+ ; Block forwarding by storing to %z which could alias either %x or %y.
+ store i32 42, i32* %z
+ %p = select i1 %flag, i32* %x, i32* %y
+ %v = load i32* %p
+ ret i32 %v
+}
+
+define float @test79(i1 %flag, float* %x, i32* %y, i32* %z) {
+; Test that we can speculate the loads around the select even when we can't
+; fold the load completely away.
+; CHECK-LABEL: @test79(
+; CHECK: %[[V1:.*]] = load float* %x
+; CHECK-NEXT: %[[V2:.*]] = load float* %y
+; CHECK-NEXT: %[[S:.*]] = select i1 %flag, float %[[V1]], float %[[V2]]
+; CHECK-NEXT: ret float %[[S]]
+entry:
+ %x1 = bitcast float* %x to i32*
+ %y1 = bitcast i32* %y to float*
+ store i32 0, i32* %x1
+ store i32 0, i32* %y
+ ; Block forwarding by storing to %z which could alias either %x or %y.
+ store i32 42, i32* %z
+ %p = select i1 %flag, float* %x, float* %y1
+ %v = load float* %p
+ ret float %v
+}
+
+define i32 @test80(i1 %flag) {
+; Test that when we speculate the loads around the select they fold throug
+; load->load folding and load->store folding.
+; CHECK-LABEL: @test80(
+; CHECK: %[[X:.*]] = alloca i32
+; CHECK-NEXT: %[[Y:.*]] = alloca i32
+; CHECK: %[[V:.*]] = load i32* %[[X]]
+; CHECK-NEXT: store i32 %[[V]], i32* %[[Y]]
+; CHECK-NEXT: ret i32 %[[V]]
+entry:
+ %x = alloca i32
+ %y = alloca i32
+ call void @scribble_on_memory(i32* %x)
+ call void @scribble_on_memory(i32* %y)
+ %tmp = load i32* %x
+ store i32 %tmp, i32* %y
+ %p = select i1 %flag, i32* %x, i32* %y
+ %v = load i32* %p
+ ret i32 %v
+}
+
+define float @test81(i1 %flag) {
+; Test that we can speculate the load around the select even though they use
+; differently typed pointers.
+; CHECK-LABEL: @test81(
+; CHECK: %[[X:.*]] = alloca i32
+; CHECK-NEXT: %[[Y:.*]] = alloca i32
+; CHECK: %[[V:.*]] = load i32* %[[X]]
+; CHECK-NEXT: store i32 %[[V]], i32* %[[Y]]
+; CHECK-NEXT: %[[C:.*]] = bitcast i32 %[[V]] to float
+; CHECK-NEXT: ret float %[[C]]
+entry:
+ %x = alloca float
+ %y = alloca i32
+ %x1 = bitcast float* %x to i32*
+ %y1 = bitcast i32* %y to float*
+ call void @scribble_on_memory(i32* %x1)
+ call void @scribble_on_memory(i32* %y)
+ %tmp = load i32* %x1
+ store i32 %tmp, i32* %y
+ %p = select i1 %flag, float* %x, float* %y1
+ %v = load float* %p
+ ret float %v
+}
+
+define i32 @test82(i1 %flag) {
+; Test that we can speculate the load around the select even though they use
+; differently typed pointers.
+; CHECK-LABEL: @test82(
+; CHECK: %[[X:.*]] = alloca float
+; CHECK-NEXT: %[[Y:.*]] = alloca i32
+; CHECK-NEXT: %[[X1:.*]] = bitcast float* %[[X]] to i32*
+; CHECK-NEXT: %[[Y1:.*]] = bitcast i32* %[[Y]] to float*
+; CHECK: %[[V:.*]] = load float* %[[X]]
+; CHECK-NEXT: store float %[[V]], float* %[[Y1]]
+; CHECK-NEXT: %[[C:.*]] = bitcast float %[[V]] to i32
+; CHECK-NEXT: ret i32 %[[C]]
+entry:
+ %x = alloca float
+ %y = alloca i32
+ %x1 = bitcast float* %x to i32*
+ %y1 = bitcast i32* %y to float*
+ call void @scribble_on_memory(i32* %x1)
+ call void @scribble_on_memory(i32* %y)
+ %tmp = load float* %x
+ store float %tmp, float* %y1
+ %p = select i1 %flag, i32* %x1, i32* %y
+ %v = load i32* %p
+ ret i32 %v
+}
OpenPOWER on IntegriCloud