summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorBenjamin Kramer <benny.kra@googlemail.com>2018-01-17 13:01:06 +0000
committerBenjamin Kramer <benny.kra@googlemail.com>2018-01-17 13:01:06 +0000
commit8d073a2c2d65f5a0ea945a74aff510f01b1181c3 (patch)
treea5e21a58fb261dddc3716f7ce62953e8a19a8226 /llvm/test
parent05dc3527de4255c28a149dee410edd0f7a3dc0d1 (diff)
downloadbcm5719-llvm-8d073a2c2d65f5a0ea945a74aff510f01b1181c3.tar.gz
bcm5719-llvm-8d073a2c2d65f5a0ea945a74aff510f01b1181c3.zip
[X86] Don't mutate shuffle arguments after early-out for AVX512
The match* functions have the annoying behavior of modifying its inputs. Save and restore the inputs, just in case the early out for AVX512 is hit. This is still not great and its only a matter of time this kind of bug happens again, but I couldn't come up with a better pattern without rewriting significant chunks of this code. Fixes PR35977. llvm-svn: 322644
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll40
1 files changed, 40 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
index 6abe609e26f..84ecf47fee7 100644
--- a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
+++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll
@@ -4788,3 +4788,43 @@ define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1(<8 x doub
ret <2 x double> %res
}
+; PR35977
+define void @test_zext_v8i8_to_v8i16(<8 x i8>* %arg, <8 x i16>* %arg1) {
+; CHECK-LABEL: test_zext_v8i8_to_v8i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
+; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; CHECK-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; CHECK-NEXT: vmovdqa %xmm0, (%rsi)
+; CHECK-NEXT: retq
+ %tmp = getelementptr <8 x i8>, <8 x i8>* %arg, i32 0
+ %tmp2 = load <8 x i8>, <8 x i8>* %tmp
+ %tmp3 = extractelement <8 x i8> %tmp2, i32 0
+ %tmp4 = zext i8 %tmp3 to i16
+ %tmp5 = insertelement <8 x i16> undef, i16 %tmp4, i32 0
+ %tmp6 = extractelement <8 x i8> %tmp2, i32 1
+ %tmp7 = zext i8 %tmp6 to i16
+ %tmp8 = insertelement <8 x i16> %tmp5, i16 %tmp7, i32 1
+ %tmp9 = extractelement <8 x i8> %tmp2, i32 2
+ %tmp10 = zext i8 %tmp9 to i16
+ %tmp11 = insertelement <8 x i16> %tmp8, i16 %tmp10, i32 2
+ %tmp12 = extractelement <8 x i8> %tmp2, i32 3
+ %tmp13 = zext i8 %tmp12 to i16
+ %tmp14 = insertelement <8 x i16> %tmp11, i16 %tmp13, i32 3
+ %tmp15 = extractelement <8 x i8> %tmp2, i32 4
+ %tmp16 = zext i8 %tmp15 to i16
+ %tmp17 = insertelement <8 x i16> %tmp14, i16 %tmp16, i32 4
+ %tmp18 = extractelement <8 x i8> %tmp2, i32 5
+ %tmp19 = zext i8 %tmp18 to i16
+ %tmp20 = insertelement <8 x i16> %tmp17, i16 %tmp19, i32 5
+ %tmp21 = extractelement <8 x i8> %tmp2, i32 6
+ %tmp22 = zext i8 %tmp21 to i16
+ %tmp23 = insertelement <8 x i16> %tmp20, i16 %tmp22, i32 6
+ %tmp24 = extractelement <8 x i8> %tmp2, i32 7
+ %tmp25 = zext i8 %tmp24 to i16
+ %tmp26 = insertelement <8 x i16> %tmp23, i16 %tmp25, i32 7
+ %tmp27 = shl <8 x i16> %tmp26, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
+ %tmp28 = getelementptr <8 x i16>, <8 x i16>* %arg1, i32 0
+ store <8 x i16> %tmp27, <8 x i16>* %tmp28
+ ret void
+}
OpenPOWER on IntegriCloud