diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-03-28 16:40:38 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2017-03-28 16:40:38 +0000 |
| commit | 3e2aa7f40ed5901abe7117c7462a7550c1b043c8 (patch) | |
| tree | 577e6436476d22f11cc68e55e65e3a037c15ddbf /llvm/test/CodeGen/X86 | |
| parent | 058f2f6d724f9f866049ceae59d42c5625f3967a (diff) | |
| download | bcm5719-llvm-3e2aa7f40ed5901abe7117c7462a7550c1b043c8.tar.gz bcm5719-llvm-3e2aa7f40ed5901abe7117c7462a7550c1b043c8.zip | |
[X86][AVX2] Add support for combining v16i16 shuffles to VPBLENDW
llvm-svn: 298929
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll | 5 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll | 7 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll | 12 |
3 files changed, 12 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll index eb51657ea61..c425e3a92d1 100644 --- a/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll +++ b/llvm/test/CodeGen/X86/clear_upper_vector_element_bits.ll @@ -103,7 +103,6 @@ define <4 x i32> @_clearupper4xi32a(<4 x i32>) nounwind { ret <4 x i32> %v3 } -; FIXME: Missed vpblendw on AVX2 target define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind { ; SSE-LABEL: _clearupper8xi32a: ; SSE: # BB#0: @@ -119,8 +118,8 @@ define <8 x i32> @_clearupper8xi32a(<8 x i32>) nounwind { ; ; AVX2-LABEL: _clearupper8xi32a: ; AVX2: # BB#0: -; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm1 -; AVX2-NEXT: vandps %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7],ymm0[8],ymm1[9],ymm0[10],ymm1[11],ymm0[12],ymm1[13],ymm0[14],ymm1[15] ; AVX2-NEXT: retq %x0 = extractelement <8 x i32> %0, i32 0 %x1 = extractelement <8 x i32> %0, i32 1 diff --git a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll index d17db7d9779..7df3c307042 100644 --- a/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll +++ b/llvm/test/CodeGen/X86/vec_uint_to_fp-fastmath.ll @@ -103,9 +103,6 @@ define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) { ; AVX2: [[FPMASKCSTADDR_v8:.LCPI[0-9_]+]]: ; AVX2-NEXT: .long 1199570944 # float 65536 -; AVX2: [[MASKCSTADDR_v8:.LCPI[0-9_]+]]: -; AVX2-NEXT: .long 65535 # 0xffff - define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) { ; SSE2-LABEL: test_uitofp_v8i32_to_v8f32: ; SSE2: # BB#0: @@ -166,8 +163,8 @@ define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) { ; AVX2-NEXT: vcvtdq2ps %ymm1, %ymm1 ; AVX2-NEXT: vbroadcastss [[FPMASKCSTADDR_v8]](%rip), %ymm2 ; AVX2-NEXT: vmulps %ymm2, %ymm1, %ymm1 -; AVX2-NEXT: vpbroadcastd [[MASKCSTADDR_v8]](%rip), %ymm2 -; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vxorps %ymm2, %ymm2, %ymm2 +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7],ymm0[8],ymm2[9],ymm0[10],ymm2[11],ymm0[12],ymm2[13],ymm0[14],ymm2[15] ; AVX2-NEXT: vcvtdq2ps %ymm0, %ymm0 ; AVX2-NEXT: vaddps %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index 03609f6d22c..1385929ab8c 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -74,12 +74,14 @@ define <32 x i8> @combine_pshufb_vpermps(<8 x float> %a) { define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) { ; X32-LABEL: combine_and_pshufb: ; X32: # BB#0: -; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] ; X32-NEXT: retl ; ; X64-LABEL: combine_and_pshufb: ; X64: # BB#0: -; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] ; X64-NEXT: retq %1 = shufflevector <32 x i8> %a0, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>) @@ -89,12 +91,14 @@ define <32 x i8> @combine_and_pshufb(<32 x i8> %a0) { define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) { ; X32-LABEL: combine_pshufb_and: ; X32: # BB#0: -; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0 +; X32-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X32-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] ; X32-NEXT: retl ; ; X64-LABEL: combine_pshufb_and: ; X64: # BB#0: -; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 +; X64-NEXT: vpxor %ymm1, %ymm1, %ymm1 +; X64-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3],ymm0[4],ymm1[5,6,7],ymm0[8],ymm1[9,10,11],ymm0[12],ymm1[13,14,15] ; X64-NEXT: retq %1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> <i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 0, i8 1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 8, i8 9, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>) %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32> <i32 0, i32 1, i32 32, i32 32, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31> |

