summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2017-02-21 16:05:35 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2017-02-21 16:05:35 +0000
commit4cc6dd0cf6a73b86155842d6fa9cb32b98aaba47 (patch)
tree6af859dc3a4d4c85c17622259e72b7fc1c495af8 /llvm/test
parentdf827a7165d325d85eeaeb1969ad1b23364f5dc0 (diff)
downloadbcm5719-llvm-4cc6dd0cf6a73b86155842d6fa9cb32b98aaba47.tar.gz
bcm5719-llvm-4cc6dd0cf6a73b86155842d6fa9cb32b98aaba47.zip
[X86][AVX] Add tests showing missed VPBROADCASTQ folding on 32-bit targets.
As i64 isn't a value type on 32-bit targets, we fail to fold the VZEXT_LOAD into VPBROADCASTQ. Also shows that we're not decoding VPERMIV3 shuffles very well.... llvm-svn: 295729
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll36
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll18
2 files changed, 54 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
index 41c01d1809d..9223498faee 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx2.ll
@@ -774,6 +774,42 @@ define <32 x i8> @combine_unpack_unpack_pshufb(<32 x i8> %a0) {
ret <32 x i8> %6
}
+define <16 x i8> @combine_broadcast_pshufb_insertion_v2i64(i64 %a0) {
+; X32-LABEL: combine_broadcast_pshufb_insertion_v2i64:
+; X32: # BB#0:
+; X32-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: vpbroadcastq %xmm0, %xmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_broadcast_pshufb_insertion_v2i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpbroadcastq %xmm0, %xmm0
+; X64-NEXT: retq
+ %1 = insertelement <2 x i64> undef, i64 %a0, i32 0
+ %2 = bitcast <2 x i64> %1 to <16 x i8>
+ %3 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
+ ret <16 x i8> %3
+}
+
+define <8 x i32> @combine_broadcast_permd_insertion_v4i64(i64 %a0) {
+; X32-LABEL: combine_broadcast_permd_insertion_v4i64:
+; X32: # BB#0:
+; X32-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
+; X32-NEXT: vbroadcastsd %xmm0, %ymm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_broadcast_permd_insertion_v4i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm0
+; X64-NEXT: vpbroadcastq %xmm0, %ymm0
+; X64-NEXT: retq
+ %1 = insertelement <4 x i64> undef, i64 %a0, i32 0
+ %2 = bitcast <4 x i64> %1 to <8 x i32>
+ %3 = tail call <8 x i32> @llvm.x86.avx2.permd(<8 x i32> %2, <8 x i32> <i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1>)
+ ret <8 x i32> %3
+}
+
define <8 x i32> @constant_fold_permd() {
; X32-LABEL: constant_fold_permd:
; X32: # BB#0:
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
index 687098f9abf..9d675f64f86 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-combining-avx512bw.ll
@@ -1133,3 +1133,21 @@ define <16 x float> @combine_vpermi2var_vpermvar_16f32_as_vperm2_zero(<16 x floa
ret <16 x float> %res1
}
+define <8 x i64> @combine_broadcast_vpermvar_insertion_v8i64(i64 %a0) {
+; X32-LABEL: combine_broadcast_vpermvar_insertion_v8i64:
+; X32: # BB#0:
+; X32-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; X32-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; X32-NEXT: vpermi2q %zmm0, %zmm1, %zmm0
+; X32-NEXT: retl
+;
+; X64-LABEL: combine_broadcast_vpermvar_insertion_v8i64:
+; X64: # BB#0:
+; X64-NEXT: vmovq %rdi, %xmm1
+; X64-NEXT: vpxord %zmm0, %zmm0, %zmm0
+; X64-NEXT: vpermi2q %zmm0, %zmm1, %zmm0
+; X64-NEXT: retq
+ %1 = insertelement <8 x i64> undef, i64 %a0, i32 0
+ %2 = tail call <8 x i64> @llvm.x86.avx512.mask.vpermi2var.q.512(<8 x i64> %1, <8 x i64> zeroinitializer, <8 x i64> undef, i8 -1)
+ ret <8 x i64> %2
+}
OpenPOWER on IntegriCloud