diff options
Diffstat (limited to 'llvm/test/CodeGen/X86')
| -rw-r--r-- | llvm/test/CodeGen/X86/vselect-avx.ll | 11 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vselect-constants.ll | 65 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/widen_compare-1.ll | 4 |
3 files changed, 33 insertions, 47 deletions
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll index 5825a56b6f9..603437e35ea 100644 --- a/llvm/test/CodeGen/X86/vselect-avx.ll +++ b/llvm/test/CodeGen/X86/vselect-avx.ll @@ -151,21 +151,22 @@ define <32 x i8> @PR22706(<32 x i1> %x) { ; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3 ; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] +; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1 ; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0 ; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0 +; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1 -; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0 -; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-NEXT: retq ; ; AVX2-LABEL: PR22706: ; AVX2: ## BB#0: ; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0 ; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0 -; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2] -; AVX2-NEXT: vpblendvb %ymm0, {{.*}}(%rip), %ymm1, %ymm0 +; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0 +; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0 ; AVX2-NEXT: retq %tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2> ret <32 x i8> %tmp diff --git a/llvm/test/CodeGen/X86/vselect-constants.ll b/llvm/test/CodeGen/X86/vselect-constants.ll index 838c03500c6..4ce2ecfa739 100644 --- a/llvm/test/CodeGen/X86/vselect-constants.ll +++ b/llvm/test/CodeGen/X86/vselect-constants.ll @@ -8,6 +8,11 @@ ; Each minimal select test is repeated with a more typical pattern that includes a compare to ; generate the condition value. +; TODO: If we don't have blendv, this can definitely be improved. There's also a selection of +; chips where it makes sense to transform the general case blendv to 2 bit-ops. That should be +; a uarch-specfic transform. At some point (Ryzen?), the implementation should catch up to the +; architecture, so blendv is as fast as a single bit-op. + define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_C1_or_C2_vec: ; SSE: # BB#0: @@ -53,19 +58,14 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_Cplus1_or_C_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_Cplus1_or_C_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1> ret <4 x i32> %add @@ -75,17 +75,16 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: cmp_sel_Cplus1_or_C_vec: ; SSE: # BB#0: ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; SSE-NEXT: psubd %xmm0, %xmm1 +; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_sel_Cplus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295] +; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1> @@ -97,17 +96,14 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) { ; SSE: # BB#0: ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_Cminus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpsrad $31, %xmm0, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1> ret <4 x i32> %add @@ -117,17 +113,13 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) { ; SSE-LABEL: cmp_sel_Cminus1_or_C_vec: ; SSE: # BB#0: ; SSE-NEXT: pcmpeqd %xmm1, %xmm0 -; SSE-NEXT: movdqa %xmm0, %xmm1 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm1 -; SSE-NEXT: pand {{.*}}(%rip), %xmm0 -; SSE-NEXT: por %xmm1, %xmm0 +; SSE-NEXT: paddd {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_sel_Cminus1_or_C_vec: ; AVX: # BB#0: ; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1] -; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0 +; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %cond = icmp eq <4 x i32> %x, %y %add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1> @@ -168,18 +160,16 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_0_or_minus1_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 +; SSE-NEXT: pand {{.*}}(%rip), %xmm0 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 -; SSE-NEXT: pxor %xmm1, %xmm0 +; SSE-NEXT: paddd %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_0_or_minus1_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0 +; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 +; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1> ret <4 x i32> %add @@ -238,17 +228,12 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) { define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) { ; SSE-LABEL: sel_0_or_1_vec: ; SSE: # BB#0: -; SSE-NEXT: pslld $31, %xmm0 -; SSE-NEXT: psrad $31, %xmm0 -; SSE-NEXT: pandn {{.*}}(%rip), %xmm0 +; SSE-NEXT: andnps {{.*}}(%rip), %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sel_0_or_1_vec: ; AVX: # BB#0: -; AVX-NEXT: vpslld $31, %xmm0, %xmm0 -; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1,1,1,1] -; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0 +; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0 ; AVX-NEXT: retq %add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1> ret <4 x i32> %add diff --git a/llvm/test/CodeGen/X86/widen_compare-1.ll b/llvm/test/CodeGen/X86/widen_compare-1.ll index 8ea0db53a39..e8d993d2280 100644 --- a/llvm/test/CodeGen/X86/widen_compare-1.ll +++ b/llvm/test/CodeGen/X86/widen_compare-1.ll @@ -7,12 +7,12 @@ define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind { ; X86-LABEL: compare_v2i64_to_v2i16: ; X86: # BB#0: -; X86-NEXT: movaps {{.*#+}} xmm0 = [65535,0,65535,0] +; X86-NEXT: pcmpeqd %xmm0, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: compare_v2i64_to_v2i16: ; X64: # BB#0: -; X64-NEXT: movaps {{.*#+}} xmm0 = [65535,65535] +; X64-NEXT: pcmpeqd %xmm0, %xmm0 ; X64-NEXT: retq %val = load <2 x i16>, <2 x i16>* %src, align 4 %cmp = icmp uge <2 x i16> %val, %val |

