summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-07-23 05:32:17 +0000
committerChris Lattner <sabre@nondot.org>2009-07-23 05:32:17 +0000
commit7d55541e564e800abb14746ec2b1d39c87f6a450 (patch)
treef2f9c064aeaf07ba23401e39e79dce9f19a4d6d9 /llvm/test
parent7c02cf609d65e9fad58844373d524ff779aa2ded (diff)
downloadbcm5719-llvm-7d55541e564e800abb14746ec2b1d39c87f6a450.tar.gz
bcm5719-llvm-7d55541e564e800abb14746ec2b1d39c87f6a450.zip
Make some existing optimizations that would only trigger on scalars
also apply to vectors. This allows us to compile this: #include <emmintrin.h> __m128i a(__m128 a, __m128 b) { return a==a & b==b; } __m128i b(__m128 a, __m128 b) { return a!=a | b!=b; } to: _a: cmpordps %xmm1, %xmm0 ret _b: cmpunordps %xmm1, %xmm0 ret with clang instead of to a ton of horrible code. llvm-svn: 76863
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/Transforms/InstCombine/vector-casts.ll33
1 files changed, 31 insertions, 2 deletions
diff --git a/llvm/test/Transforms/InstCombine/vector-casts.ll b/llvm/test/Transforms/InstCombine/vector-casts.ll
index c6d1eaa1ff0..e6171852061 100644
--- a/llvm/test/Transforms/InstCombine/vector-casts.ll
+++ b/llvm/test/Transforms/InstCombine/vector-casts.ll
@@ -5,7 +5,7 @@ define <2 x i1> @test1(<2 x i64> %a) {
%t = trunc <2 x i64> %a to <2 x i1>
ret <2 x i1> %t
-; CHECK: define <2 x i1> @test1
+; CHECK: @test1
; CHECK: and <2 x i64> %a, <i64 1, i64 1>
; CHECK: icmp ne <2 x i64> %tmp, zeroinitializer
}
@@ -16,7 +16,36 @@ define <2 x i64> @test2(<2 x i64> %a) {
%t = ashr <2 x i64> %b, <i64 1, i64 1>
ret <2 x i64> %t
-; CHECK: define <2 x i64> @test2
+; CHECK: @test2
; CHECK: and <2 x i64> %a, <i64 65535, i64 65535>
; CHECK: lshr <2 x i64> %b, <i64 1, i64 1>
}
+
+
+
+define <2 x i64> @test3(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp ord <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp ord <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %and = and <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %and to <2 x i64>
+ ret <2 x i64> %conv
+
+; CHECK: @test3
+; CHECK: fcmp ord <4 x float> %a, %b
+}
+
+define <2 x i64> @test4(<4 x float> %a, <4 x float> %b) nounwind readnone {
+entry:
+ %cmp = fcmp uno <4 x float> %a, zeroinitializer
+ %sext = sext <4 x i1> %cmp to <4 x i32>
+ %cmp4 = fcmp uno <4 x float> %b, zeroinitializer
+ %sext5 = sext <4 x i1> %cmp4 to <4 x i32>
+ %or = or <4 x i32> %sext, %sext5
+ %conv = bitcast <4 x i32> %or to <2 x i64>
+ ret <2 x i64> %conv
+; CHECK: @test4
+; CHECK: fcmp uno <4 x float> %a, %b
+}
OpenPOWER on IntegriCloud