summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-01-05 18:48:11 +0000
committerCraig Topper <craig.topper@intel.com>2019-01-05 18:48:11 +0000
commit3f48dbf72e2267b85bab1d5924f264569c4db09f (patch)
treee90f2d8d1c96cc9e0e81ef6efe8e69791c9f0c5c /llvm/test/CodeGen/X86
parentda32d7f1479d3b8016af0da55a15c13cd1369d94 (diff)
downloadbcm5719-llvm-3f48dbf72e2267b85bab1d5924f264569c4db09f.tar.gz
bcm5719-llvm-3f48dbf72e2267b85bab1d5924f264569c4db09f.zip
[X86] Allow LowerTRUNCATE to use PACKUS/PACKSS for v16i16->v16i8 truncate when -mprefer-vector-width-256 is in effect and BWI is not available.
llvm-svn: 350473
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll4
-rw-r--r--llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll8
2 files changed, 3 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll b/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
index b4f8e5b0b6c..b4d452f2d3e 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-mask-extend.ll
@@ -48,11 +48,9 @@ define <16 x i8> @testv16i1_sext_v16i8(<8 x i32>* %p, <8 x i32>* %q) {
; AVX256-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0
; AVX256-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z}
; AVX256-NEXT: vpmovdw %ymm1, %xmm1
-; AVX256-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX256-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX256-NEXT: vpmovdw %ymm0, %xmm0
-; AVX256-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX256-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX256-NEXT: vzeroupper
; AVX256-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll b/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
index 92f6e271126..7f4480ceb63 100644
--- a/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
+++ b/llvm/test/CodeGen/X86/prefer-avx256-mask-shuffle.ll
@@ -34,11 +34,9 @@ define <16 x i1> @shuf16i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0(<8 x i32>* %a, <8
; AVX256VL-NEXT: kshiftrw $8, %k0, %k2
; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z}
; AVX256VL-NEXT: vpmovdw %ymm1, %xmm1
-; AVX256VL-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX256VL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX256VL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256VL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX256VL-NEXT: vzeroupper
; AVX256VL-NEXT: retq
;
@@ -169,11 +167,9 @@ define <32 x i1> @shuf32i1_3_6_22_12_3_7_7_0_3_6_1_13_3_21_7_0_3_6_22_12_3_7_7_0
; AVX256VL-NEXT: kshiftrw $8, %k0, %k2
; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm1 {%k2} {z}
; AVX256VL-NEXT: vpmovdw %ymm1, %xmm1
-; AVX256VL-NEXT: vpsrlw $8, %xmm1, %xmm1
; AVX256VL-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z}
; AVX256VL-NEXT: vpmovdw %ymm0, %xmm0
-; AVX256VL-NEXT: vpsrlw $8, %xmm0, %xmm0
-; AVX256VL-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX256VL-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX256VL-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0
; AVX256VL-NEXT: retq
;
OpenPOWER on IntegriCloud