summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2017-10-24 15:38:16 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2017-10-24 15:38:16 +0000
commit0a12c239b62f9d4aec92992937a4ae30a1c5d54c (patch)
treea4d57f144243543e439bc422150e811e0930d9aa /llvm/test/CodeGen/X86/bitcast-setcc-512.ll
parent1216acc3c4df4e8819b490484fe866fce75ae511 (diff)
downloadbcm5719-llvm-0a12c239b62f9d4aec92992937a4ae30a1c5d54c.tar.gz
bcm5719-llvm-0a12c239b62f9d4aec92992937a4ae30a1c5d54c.zip
[X86] truncateVectorCompareWithPACKSS - use PACKSSDW/PACKSSWB instead of just PACKSSWB.
By using the widest type possible for PACKSS truncation we have a better chance of being able to peek through bitcasts and improves other combines driven by ComputeNumSignBits. llvm-svn: 316448
Diffstat (limited to 'llvm/test/CodeGen/X86/bitcast-setcc-512.ll')
-rw-r--r--llvm/test/CodeGen/X86/bitcast-setcc-512.ll40
1 files changed, 20 insertions, 20 deletions
diff --git a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
index bcd05660727..2b73c6e16bd 100644
--- a/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
+++ b/llvm/test/CodeGen/X86/bitcast-setcc-512.ll
@@ -222,10 +222,10 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; SSE: # BB#0:
; SSE-NEXT: pcmpgtd %xmm7, %xmm3
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
-; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: pcmpgtd %xmm5, %xmm1
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
-; SSE-NEXT: packsswb %xmm1, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
; SSE-NEXT: packsswb %xmm2, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@@ -237,12 +237,12 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b) {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@@ -287,10 +287,10 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; SSE: # BB#0:
; SSE-NEXT: cmpltps %xmm3, %xmm7
; SSE-NEXT: cmpltps %xmm2, %xmm6
-; SSE-NEXT: packsswb %xmm7, %xmm6
+; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: cmpltps %xmm1, %xmm5
; SSE-NEXT: cmpltps %xmm0, %xmm4
-; SSE-NEXT: packsswb %xmm5, %xmm4
+; SSE-NEXT: packssdw %xmm5, %xmm4
; SSE-NEXT: packsswb %xmm6, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@@ -300,10 +300,10 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@@ -1040,11 +1040,11 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; SSE: # BB#0:
; SSE-NEXT: pcmpgtq %xmm7, %xmm3
; SSE-NEXT: pcmpgtq %xmm6, %xmm2
-; SSE-NEXT: packsswb %xmm3, %xmm2
+; SSE-NEXT: packssdw %xmm3, %xmm2
; SSE-NEXT: pcmpgtq %xmm5, %xmm1
; SSE-NEXT: pcmpgtq %xmm4, %xmm0
-; SSE-NEXT: packsswb %xmm1, %xmm0
-; SSE-NEXT: packsswb %xmm2, %xmm0
+; SSE-NEXT: packssdw %xmm1, %xmm0
+; SSE-NEXT: packssdw %xmm2, %xmm0
; SSE-NEXT: packsswb %xmm0, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1056,12 +1056,12 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpacksswb %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpacksswb %xmm3, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1072,7 +1072,7 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b) {
; AVX2: # BB#0:
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1104,11 +1104,11 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; SSE: # BB#0:
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
-; SSE-NEXT: packsswb %xmm7, %xmm6
+; SSE-NEXT: packssdw %xmm7, %xmm6
; SSE-NEXT: cmpltpd %xmm1, %xmm5
; SSE-NEXT: cmpltpd %xmm0, %xmm4
-; SSE-NEXT: packsswb %xmm5, %xmm4
-; SSE-NEXT: packsswb %xmm6, %xmm4
+; SSE-NEXT: packssdw %xmm5, %xmm4
+; SSE-NEXT: packssdw %xmm6, %xmm4
; SSE-NEXT: packsswb %xmm0, %xmm4
; SSE-NEXT: pmovmskb %xmm4, %eax
; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1118,10 +1118,10 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX1: # BB#0:
; AVX1-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vpacksswb %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vpackssdw %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovmskps %ymm0, %eax
; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@@ -1132,7 +1132,7 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b) {
; AVX2: # BB#0:
; AVX2-NEXT: vcmpltpd %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vcmpltpd %ymm0, %ymm2, %ymm0
-; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
; AVX2-NEXT: vmovmskps %ymm0, %eax
; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
OpenPOWER on IntegriCloud