diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vselect-avx.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vselect-avx.ll | 58 |
1 files changed, 58 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll index 7926b0c71f1..0c0f4bbf992 100644 --- a/llvm/test/CodeGen/X86/vselect-avx.ll +++ b/llvm/test/CodeGen/X86/vselect-avx.ll @@ -25,3 +25,61 @@ body: store <4 x i16> %predphi42, <4 x i16>* %b, align 8 ret void } + +; Improve code coverage. +; +; When shrinking the condition used into the select to match a blend, this +; test case exercises the path where the modified node is not the root +; of the condition. +; +; CHECK-LABEL: test2: +; CHECK: vpslld $31, %xmm0, %xmm0 +; CHECK-NEXT: vpmovsxdq %xmm0, %xmm1 +; CHECK-NEXT: vpshufd $78, %xmm0, %xmm0 ## xmm0 = xmm0[2,3,0,1] +; CHECK-NEXT: vpmovsxdq %xmm0, %xmm0 +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, [[MASK:%ymm[0-9]+]] +; CHECK: vblendvpd [[MASK]] +; CHECK: retq +define void @test2(double** %call1559, i64 %indvars.iv4198, <4 x i1> %tmp1895) { +bb: + %arrayidx1928 = getelementptr inbounds double** %call1559, i64 %indvars.iv4198 + %tmp1888 = load double** %arrayidx1928, align 8 + %predphi.v.v = select <4 x i1> %tmp1895, <4 x double> <double -5.000000e-01, double -5.000000e-01, double -5.000000e-01, double -5.000000e-01>, <4 x double> <double 5.000000e-01, double 5.000000e-01, double 5.000000e-01, double 5.000000e-01> + %tmp1900 = bitcast double* %tmp1888 to <4 x double>* + store <4 x double> %predphi.v.v, <4 x double>* %tmp1900, align 8 + ret void +} + +; For this test, we used to optimized the conditional mask for the blend, i.e., +; we shrunk some of its bits. +; However, this same mask was used in another select (%predphi31) that turned out +; to be optimized into a and. In that case, the conditional mask was wrong. +; +; Make sure that the and is fed by the original mask. +; +; <rdar://problem/18819506> + +; Note: For now, hard code ORIG_MASK and SHRUNK_MASK registers, because we +; cannot express that ORIG_MASK must not be equal to ORIG_MASK. Otherwise, +; even a faulty pattern would pass! +; +; CHECK-LABEL: test3: +; Compute the original mask. +; CHECK: vpcmpeqd {{%xmm[0-9]+}}, {{%xmm[0-9]+}}, [[ORIG_MASK:%xmm0]] +; Shrink the bit of the mask. +; CHECK-NEXT: vpslld $31, [[ORIG_MASK]], [[SHRUNK_MASK:%xmm3]] +; Use the shrunk mask in the blend. +; CHECK-NEXT: vblendvps [[SHRUNK_MASK]], %xmm{{[0-9]+}}, %xmm{{[0-9]+}}, %xmm{{[0-9]+}} +; Use the original mask in the and. +; CHECK-NEXT: vpand LCPI2_2(%rip), [[ORIG_MASK]], {{%xmm[0-9]+}} +; CHECK: retq +define void @test3(<4 x i32> %induction30, <4 x i16>* %tmp16, <4 x i16>* %tmp17, <4 x i16> %tmp3, <4 x i16> %tmp12) { + %tmp6 = srem <4 x i32> %induction30, <i32 3, i32 3, i32 3, i32 3> + %tmp7 = icmp eq <4 x i32> %tmp6, zeroinitializer + %predphi = select <4 x i1> %tmp7, <4 x i16> %tmp3, <4 x i16> %tmp12 + %predphi31 = select <4 x i1> %tmp7, <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>, <4 x i16> zeroinitializer + + store <4 x i16> %predphi31, <4 x i16>* %tmp16, align 8 + store <4 x i16> %predphi, <4 x i16>* %tmp17, align 8 + ret void +} |

