diff options
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll | 26 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll | 12 |
2 files changed, 38 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll index 351c91e7acf..2dc50cfe416 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx.ll @@ -123,6 +123,32 @@ define <8 x float> @combine_vpermilvar_vperm2f128_zero_8f32(<8 x float> %a0) { ret <8 x float> %3 } +define <4 x double> @combine_vperm2f128_vpermilvar_as_vpblendpd(<4 x double> %a0) { +; AVX1-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX1: # BB#0: +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX1-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3] +; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX2: # BB#0: +; AVX2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_vperm2f128_vpermilvar_as_vpblendpd: +; AVX512F: # BB#0: +; AVX512F-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; AVX512F-NEXT: retq + %1 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %a0, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) + %2 = shufflevector <4 x double> %1, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 4, i32 5> + %3 = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %2, <4 x i64> <i64 2, i64 0, i64 2, i64 0>) + ret <4 x double> %3 +} + define <8 x float> @combine_vpermilvar_8f32_movddup(<8 x float> %a0) { ; ALL-LABEL: combine_vpermilvar_8f32_movddup: ; ALL: # BB#0: diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index b087fc43ae3..97492dddd3d 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -62,6 +62,18 @@ define <4 x i64> @combine_permq_pshufb_as_vperm2i128(<4 x i64> %a0) { ret <4 x i64> %5 } +define <32 x i8> @combine_permq_pshufb_as_vpblendd(<4 x i64> %a0) { +; CHECK-LABEL: combine_permq_pshufb_as_vpblendd: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5] +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero +; CHECK-NEXT: retq + %1 = shufflevector <4 x i64> %a0, <4 x i64> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2> + %2 = bitcast <4 x i64> %1 to <32 x i8> + %3 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %2, <32 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255, i8 255>) + ret <32 x i8> %3 +} + define <16 x i8> @combine_pshufb_as_vpbroadcastb128(<16 x i8> %a) { ; CHECK-LABEL: combine_pshufb_as_vpbroadcastb128: ; CHECK: # BB#0: |