summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-01-27 09:10:58 +0000
committerCraig Topper <craig.topper@intel.com>2018-01-27 09:10:58 +0000
commit2c570eaa009e00ecb42cc8e59975efd4f2a303f1 (patch)
treeb72b90189ec5922383ae6e7b4c93df58d7751676 /llvm/test
parent37304c475a47ff24e56ef48754fba3b2325e39e5 (diff)
downloadbcm5719-llvm-2c570eaa009e00ecb42cc8e59975efd4f2a303f1.tar.gz
bcm5719-llvm-2c570eaa009e00ecb42cc8e59975efd4f2a303f1.zip
[TargetLowering] Teach TargetLowering::SimplifySetCC to simplify setcc of vXi1 vectors into logic ops.
This transform was already being done for setcc of scalar i1. This extends it to vectors. llvm-svn: 323585
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/avx512-mask-op.ll32
-rwxr-xr-xllvm/test/CodeGen/X86/avx512-schedule.ll16
-rw-r--r--llvm/test/CodeGen/X86/x86-interleaved-access.ll108
3 files changed, 74 insertions, 82 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll
index 45a84fbe304..fcbd4e07460 100644
--- a/llvm/test/CodeGen/X86/avx512-mask-op.ll
+++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll
@@ -502,9 +502,8 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; KNL-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
; KNL-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; KNL-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
-; KNL-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; KNL-NEXT: kandnw %k0, %k1, %k1
+; KNL-NEXT: vpcmpleq %zmm1, %zmm0, %k1 {%k1}
; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: vzeroupper
@@ -512,9 +511,8 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
;
; SKX-LABEL: test4:
; SKX: ## %bb.0:
-; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0
; SKX-NEXT: vpcmpgtq %ymm3, %ymm2, %k1
-; SKX-NEXT: kandnw %k0, %k1, %k0
+; SKX-NEXT: vpcmpleq %ymm1, %ymm0, %k0 {%k1}
; SKX-NEXT: vpmovm2d %k0, %xmm0
; SKX-NEXT: vzeroupper
; SKX-NEXT: retq
@@ -525,9 +523,8 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; AVX512BW-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
; AVX512BW-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512BW-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
-; AVX512BW-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; AVX512BW-NEXT: kandnw %k0, %k1, %k1
+; AVX512BW-NEXT: vpcmpleq %zmm1, %zmm0, %k1 {%k1}
; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
@@ -539,9 +536,8 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
; AVX512DQ-NEXT: ## kill: def %ymm2 killed %ymm2 def %zmm2
; AVX512DQ-NEXT: ## kill: def %ymm1 killed %ymm1 def %zmm1
; AVX512DQ-NEXT: ## kill: def %ymm0 killed %ymm0 def %zmm0
-; AVX512DQ-NEXT: vpcmpgtq %zmm1, %zmm0, %k0
; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; AVX512DQ-NEXT: kandnw %k0, %k1, %k0
+; AVX512DQ-NEXT: vpcmpleq %zmm1, %zmm0, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2d %k0, %zmm0
; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
@@ -560,9 +556,8 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
; KNL-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
; KNL-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
-; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
-; KNL-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; KNL-NEXT: kandnw %k1, %k0, %k1
+; KNL-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
+; KNL-NEXT: vpcmpleq %zmm3, %zmm2, %k1 {%k1}
; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; KNL-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; KNL-NEXT: vzeroupper
@@ -570,9 +565,8 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
;
; SKX-LABEL: test5:
; SKX: ## %bb.0:
-; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k0
-; SKX-NEXT: vpcmpgtq %xmm3, %xmm2, %k1
-; SKX-NEXT: kandnw %k1, %k0, %k0
+; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k1
+; SKX-NEXT: vpcmpleq %xmm3, %xmm2, %k0 {%k1}
; SKX-NEXT: vpmovm2q %k0, %xmm0
; SKX-NEXT: retq
;
@@ -582,9 +576,8 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
; AVX512BW-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
; AVX512BW-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
-; AVX512BW-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
-; AVX512BW-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; AVX512BW-NEXT: kandnw %k1, %k0, %k1
+; AVX512BW-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512BW-NEXT: vpcmpleq %zmm3, %zmm2, %k1 {%k1}
; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
; AVX512BW-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512BW-NEXT: vzeroupper
@@ -596,9 +589,8 @@ define <2 x i64> @test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1
; AVX512DQ-NEXT: ## kill: def %xmm2 killed %xmm2 def %zmm2
; AVX512DQ-NEXT: ## kill: def %xmm1 killed %xmm1 def %zmm1
; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 def %zmm0
-; AVX512DQ-NEXT: vpcmpgtq %zmm0, %zmm1, %k0
-; AVX512DQ-NEXT: vpcmpgtq %zmm3, %zmm2, %k1
-; AVX512DQ-NEXT: kandnw %k1, %k0, %k0
+; AVX512DQ-NEXT: vpcmpgtq %zmm0, %zmm1, %k1
+; AVX512DQ-NEXT: vpcmpleq %zmm3, %zmm2, %k0 {%k1}
; AVX512DQ-NEXT: vpmovm2q %k0, %zmm0
; AVX512DQ-NEXT: ## kill: def %xmm0 killed %xmm0 killed %zmm0
; AVX512DQ-NEXT: vzeroupper
diff --git a/llvm/test/CodeGen/X86/avx512-schedule.ll b/llvm/test/CodeGen/X86/avx512-schedule.ll
index 59fb05c06b7..7a1bd5d33dd 100755
--- a/llvm/test/CodeGen/X86/avx512-schedule.ll
+++ b/llvm/test/CodeGen/X86/avx512-schedule.ll
@@ -7031,18 +7031,16 @@ entry:
define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1) {
; GENERIC-LABEL: test4:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; GENERIC-NEXT: vpcmpgtq %ymm3, %ymm2, %k1 # sched: [3:1.00]
-; GENERIC-NEXT: kandnw %k0, %k1, %k0 # sched: [1:1.00]
+; GENERIC-NEXT: vpcmpleq %ymm1, %ymm0, %k0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2d %k0, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: vzeroupper # sched: [100:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: test4:
; SKX: # %bb.0:
-; SKX-NEXT: vpcmpgtq %ymm1, %ymm0, %k0 # sched: [3:1.00]
; SKX-NEXT: vpcmpgtq %ymm3, %ymm2, %k1 # sched: [3:1.00]
-; SKX-NEXT: kandnw %k0, %k1, %k0 # sched: [1:1.00]
+; SKX-NEXT: vpcmpleq %ymm1, %ymm0, %k0 {%k1} # sched: [3:1.00]
; SKX-NEXT: vpmovm2d %k0, %xmm0 # sched: [1:0.25]
; SKX-NEXT: vzeroupper # sched: [4:1.00]
; SKX-NEXT: retq # sched: [7:1.00]
@@ -7056,17 +7054,15 @@ define <4 x i32> @test4(<4 x i64> %x, <4 x i64> %y, <4 x i64> %x1, <4 x i64> %y1
define <2 x i64> @vcmp_test5(<2 x i64> %x, <2 x i64> %y, <2 x i64> %x1, <2 x i64> %y1) {
; GENERIC-LABEL: vcmp_test5:
; GENERIC: # %bb.0:
-; GENERIC-NEXT: vpcmpgtq %xmm0, %xmm1, %k0 # sched: [3:1.00]
-; GENERIC-NEXT: vpcmpgtq %xmm3, %xmm2, %k1 # sched: [3:1.00]
-; GENERIC-NEXT: kandnw %k1, %k0, %k0 # sched: [1:1.00]
+; GENERIC-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 # sched: [3:1.00]
+; GENERIC-NEXT: vpcmpleq %xmm3, %xmm2, %k0 {%k1} # sched: [3:1.00]
; GENERIC-NEXT: vpmovm2q %k0, %xmm0 # sched: [1:0.33]
; GENERIC-NEXT: retq # sched: [1:1.00]
;
; SKX-LABEL: vcmp_test5:
; SKX: # %bb.0:
-; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k0 # sched: [3:1.00]
-; SKX-NEXT: vpcmpgtq %xmm3, %xmm2, %k1 # sched: [3:1.00]
-; SKX-NEXT: kandnw %k1, %k0, %k0 # sched: [1:1.00]
+; SKX-NEXT: vpcmpgtq %xmm0, %xmm1, %k1 # sched: [3:1.00]
+; SKX-NEXT: vpcmpleq %xmm3, %xmm2, %k0 {%k1} # sched: [3:1.00]
; SKX-NEXT: vpmovm2q %k0, %xmm0 # sched: [1:0.25]
; SKX-NEXT: retq # sched: [7:1.00]
%x_gt_y = icmp slt <2 x i64> %x, %y
diff --git a/llvm/test/CodeGen/X86/x86-interleaved-access.ll b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
index 946c9b8afa2..c1ea4129b33 100644
--- a/llvm/test/CodeGen/X86/x86-interleaved-access.ll
+++ b/llvm/test/CodeGen/X86/x86-interleaved-access.ll
@@ -543,7 +543,8 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vpcmpeqb %xmm0, %xmm5, %xmm0
-; AVX1-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@@ -591,7 +592,8 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
; AVX2-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0
-; AVX2-NEXT: vpcmpeqb %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpxor %xmm0, %xmm2, %xmm0
+; AVX2-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@@ -660,97 +662,98 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) {
define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX1-LABEL: interleaved_load_vf32_i8_stride4:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rdi), %ymm10
-; AVX1-NEXT: vmovdqa 32(%rdi), %ymm13
+; AVX1-NEXT: vmovdqa (%rdi), %ymm11
+; AVX1-NEXT: vmovdqa 32(%rdi), %ymm14
; AVX1-NEXT: vmovdqa 64(%rdi), %ymm2
; AVX1-NEXT: vmovdqa 96(%rdi), %ymm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = <u,u,u,u,0,4,8,12,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm11
-; AVX1-NEXT: vpshufb %xmm6, %xmm11, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm12
+; AVX1-NEXT: vpshufb %xmm6, %xmm12, %xmm5
; AVX1-NEXT: vpshufb %xmm6, %xmm3, %xmm7
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm12
-; AVX1-NEXT: vpshufb %xmm0, %xmm12, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm13
+; AVX1-NEXT: vpshufb %xmm0, %xmm13, %xmm4
; AVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm7[4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm8
-; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm14
-; AVX1-NEXT: vpshufb %xmm6, %xmm14, %xmm5
-; AVX1-NEXT: vpshufb %xmm6, %xmm13, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm15
+; AVX1-NEXT: vpshufb %xmm6, %xmm15, %xmm5
+; AVX1-NEXT: vpshufb %xmm6, %xmm14, %xmm6
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm6[0],xmm5[0],xmm6[1],xmm5[1]
-; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm6
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm4
-; AVX1-NEXT: vpshufb %xmm0, %xmm10, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm0
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm5[4,5,6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm0[0,1,2,3],ymm8[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm4
+; AVX1-NEXT: vpshufb %xmm0, %xmm12, %xmm4
; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm5
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm5, %xmm12, %xmm1
+; AVX1-NEXT: vpshufb %xmm5, %xmm13, %xmm1
; AVX1-NEXT: vpshufb %xmm5, %xmm2, %xmm7
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm4[4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm4
-; AVX1-NEXT: vpshufb %xmm0, %xmm13, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm4
+; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm0
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX1-NEXT: vpshufb %xmm5, %xmm6, %xmm4
-; AVX1-NEXT: vpshufb %xmm5, %xmm10, %xmm5
+; AVX1-NEXT: vpshufb %xmm5, %xmm11, %xmm5
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,2,6,10,14,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm1
+; AVX1-NEXT: vpshufb %xmm0, %xmm12, %xmm1
; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm4
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm4, %xmm12, %xmm5
+; AVX1-NEXT: vpshufb %xmm4, %xmm13, %xmm5
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm7
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm7[0],xmm5[0],xmm7[1],xmm5[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm5[0,1,2,3],xmm1[4,5,6,7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
-; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm5
-; AVX1-NEXT: vpshufb %xmm0, %xmm13, %xmm0
+; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm5
+; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm0
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm5
-; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm4
+; AVX1-NEXT: vpshufb %xmm4, %xmm11, %xmm4
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm4[0,1,2,3],xmm0[4,5,6,7]
+; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm1[4,5,6,7]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm0, %xmm12, %xmm1
+; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
+; AVX1-NEXT: vpshufb %xmm3, %xmm13, %xmm4
+; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
+; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm2
+; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm0
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; AVX1-NEXT: vpshufb %xmm3, %xmm6, %xmm2
+; AVX1-NEXT: vpshufb %xmm3, %xmm11, %xmm3
+; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,u,u,u,3,7,11,15,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm1, %xmm11, %xmm4
-; AVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm3
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
-; AVX1-NEXT: vpshufb %xmm4, %xmm12, %xmm5
-; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm3[4,5,6,7]
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
-; AVX1-NEXT: vpshufb %xmm1, %xmm14, %xmm3
-; AVX1-NEXT: vpshufb %xmm1, %xmm13, %xmm1
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
-; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm3
-; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm4
-; AVX1-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
-; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0,1,2,3],xmm1[4,5,6,7]
-; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
+; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpcmpeqb %xmm9, %xmm8, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm4
-; AVX1-NEXT: vpcmpeqb %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm4
-; AVX1-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
-; AVX1-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
+; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm3
+; AVX1-NEXT: vpcmpeqb %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpeqb %xmm0, %xmm10, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; AVX1-NEXT: vxorps %ymm0, %ymm1, %ymm0
+; AVX1-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_load_vf32_i8_stride4:
@@ -845,7 +848,8 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) {
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3,4,5],ymm2[6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpcmpeqb %ymm1, %ymm0, %ymm0
-; AVX2-NEXT: vpcmpeqb %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpxor %ymm0, %ymm8, %ymm0
+; AVX2-NEXT: vpxor {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_load_vf32_i8_stride4:
OpenPOWER on IntegriCloud