summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-bitreverse.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2017-02-11 05:32:57 +0000
committerCraig Topper <craig.topper@gmail.com>2017-02-11 05:32:57 +0000
commit464b8cb24427478bdcee5d8bb338fb219d0751ed (patch)
treef50e506887b572293f740032089bcb968c4dcd30 /llvm/test/CodeGen/X86/vector-bitreverse.ll
parenta67cf0001f5d24305f6c443da05266781c94de2e (diff)
downloadbcm5719-llvm-464b8cb24427478bdcee5d8bb338fb219d0751ed.tar.gz
bcm5719-llvm-464b8cb24427478bdcee5d8bb338fb219d0751ed.zip
[X86] Don't base domain decisions on VEXTRACTF128/VINSERTF128 if only AVX1 is available.
Seems the execution dependency pass likes to use FP instructions when most of the consuming code is integer if a vextractf128 instruction produced the register. Without AVX2 we don't have the corresponding integer instruction available. This patch suppresses the domain on these instructions to GenericDomain if AVX2 is not supported so that they are ignored by domain fixing. If AVX2 is supported we'll report the correct domain and allow them to switch between integer and fp. Overall I think this produces better results in the modified test cases. llvm-svn: 294824
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-bitreverse.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-bitreverse.ll16
1 files changed, 8 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/vector-bitreverse.ll b/llvm/test/CodeGen/X86/vector-bitreverse.ll
index beec58bdaf7..226c0adbaf3 100644
--- a/llvm/test/CodeGen/X86/vector-bitreverse.ll
+++ b/llvm/test/CodeGen/X86/vector-bitreverse.ll
@@ -613,8 +613,8 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX1-LABEL: test_bitreverse_v32i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm2, %xmm1, %xmm3
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
@@ -622,7 +622,7 @@ define <32 x i8> @test_bitreverse_v32i8(<32 x i8> %a) nounwind {
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vandps %xmm2, %xmm0, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm3
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
@@ -1361,8 +1361,8 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-LABEL: test_bitreverse_v64i8:
; AVX1: # BB#0:
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovaps {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [0,128,64,192,32,160,96,224,16,144,80,208,48,176,112,240]
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
@@ -1370,7 +1370,7 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
@@ -1378,13 +1378,13 @@ define <64 x i8> @test_bitreverse_v64i8(<64 x i8> %a) nounwind {
; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm2, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm2
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vpor %xmm2, %xmm4, %xmm2
-; AVX1-NEXT: vandps %xmm3, %xmm1, %xmm4
+; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm4
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
OpenPOWER on IntegriCloud