From 55cf8809002dd7ab04943ac66e19358e3fe07b4b Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 28 Dec 2017 19:46:11 +0000 Subject: [X86] When lowering extending loads from v2i1/v4i1, if we have VLX, use a narrower extend. Previously we used an extend from v8i1 to v8i32/v8i64. Then extracted to the final width. But if we have VLX we should extract first. This way we don't end up with an overly large extend. This allows us to use vcmpeq to make all ones for the sign extend when DQI isn't available. Otherwise we get a VPTERNLOG. If we make v2i1/v4i1 legal like proposed in D41560, we could always do this and rely on the lowering of the extend to widen when necessary. llvm-svn: 321538 --- llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll') diff --git a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll index 6d9f832d861..45a48fae146 100644 --- a/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-int-to-vector-bool.ll @@ -46,9 +46,8 @@ define <2 x i1> @bitcast_i2_2i1(i2 zeroext %a0) { ; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %zmm0 -; AVX512-NEXT: vzeroupper +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} ; AVX512-NEXT: retq %1 = bitcast i2 %a0 to <2 x i1> ret <2 x i1> %1 @@ -90,10 +89,8 @@ define <4 x i1> @bitcast_i4_4i1(i4 zeroext %a0) { ; AVX512-NEXT: movb %dil, -{{[0-9]+}}(%rsp) ; AVX512-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax ; AVX512-NEXT: kmovd %eax, %k1 -; AVX512-NEXT: vpcmpeqd %ymm0, %ymm0, %ymm0 -; AVX512-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} -; AVX512-NEXT: # kill: def %xmm0 killed %xmm0 killed %ymm0 -; AVX512-NEXT: vzeroupper +; AVX512-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} ; AVX512-NEXT: retq %1 = bitcast i4 %a0 to <4 x i1> ret <4 x i1> %1 -- cgit v1.2.3