summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2017-08-24 23:24:43 +0000
committerSanjay Patel <spatel@rotateright.com>2017-08-24 23:24:43 +0000
commite404cbff6630ef8f14fc7ba435d957136ff71681 (patch)
treebeb46f86b6d2c0627f6dd18036ec04e041687d9f /llvm/test
parent872f689d0a5507742cb6b8ed35cad4981ceb2d85 (diff)
downloadbcm5719-llvm-e404cbff6630ef8f14fc7ba435d957136ff71681.tar.gz
bcm5719-llvm-e404cbff6630ef8f14fc7ba435d957136ff71681.zip
[DAG] convert vector select-of-constants to logic/math
This goes back to a discussion about IR canonicalization. We'd like to preserve and convert more IR to 'select' than we currently do because that's likely the best choice in IR: http://lists.llvm.org/pipermail/llvm-dev/2016-September/105335.html ...but that's often not true for codegen, so we need to account for this pattern coming in to the backend and transform it to better DAG ops. Steps in this patch: 1. Add an EVT param to the existing convertSelectOfConstantsToMath() TLI hook to more finely enable this transform. Other targets will probably want that anyway to distinguish scalars from vectors. We're using that here to exclude AVX512 targets, but it may not be necessary. 2. Convert a vselect to ext+add. This eliminates a constant load/materialization, and the vector ext is often free. Implementing a more general fold using xor+and can be a follow-up for targets that don't have a legal vselect. It's also possible that we can remove the TLI hook for the special case fold implemented here because we're eliminating a constant, but it needs to be tested on other targets. Differential Revision: https://reviews.llvm.org/D36840 llvm-svn: 311731
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/PowerPC/vselect-constants.ll80
-rw-r--r--llvm/test/CodeGen/X86/vselect-avx.ll11
-rw-r--r--llvm/test/CodeGen/X86/vselect-constants.ll65
-rw-r--r--llvm/test/CodeGen/X86/widen_compare-1.ll4
4 files changed, 53 insertions, 107 deletions
diff --git a/llvm/test/CodeGen/PowerPC/vselect-constants.ll b/llvm/test/CodeGen/PowerPC/vselect-constants.ll
index 2dbe12e882d..077eb2defc0 100644
--- a/llvm/test/CodeGen/PowerPC/vselect-constants.ll
+++ b/llvm/test/CodeGen/PowerPC/vselect-constants.ll
@@ -47,18 +47,12 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_Cplus1_or_C_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
+; CHECK-NEXT: vspltisw 3, 1
; CHECK-NEXT: addis 3, 2, .LCPI2_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI2_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI2_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI2_1@toc@l
-; CHECK-NEXT: lvx 18, 0, 3
-; CHECK-NEXT: lvx 19, 0, 4
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 50, 34
+; CHECK-NEXT: lvx 19, 0, 3
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
ret <4 x i32> %add
@@ -69,12 +63,9 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI3_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI3_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI3_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI3_1@toc@l
; CHECK-NEXT: lvx 19, 0, 3
-; CHECK-NEXT: lvx 4, 0, 4
-; CHECK-NEXT: xxsel 34, 36, 51, 34
+; CHECK-NEXT: vsubuwm 2, 19, 2
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
@@ -87,15 +78,12 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
; CHECK-NEXT: addis 3, 2, .LCPI4_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI4_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI4_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI4_1@toc@l
-; CHECK-NEXT: lvx 18, 0, 3
-; CHECK-NEXT: lvx 19, 0, 4
+; CHECK-NEXT: lvx 19, 0, 3
; CHECK-NEXT: vsubuwm 3, 4, 3
; CHECK-NEXT: vslw 2, 2, 3
; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 50, 34
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
ret <4 x i32> %add
@@ -106,12 +94,9 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: addis 3, 2, .LCPI5_0@toc@ha
-; CHECK-NEXT: addis 4, 2, .LCPI5_1@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI5_0@toc@l
-; CHECK-NEXT: addi 4, 4, .LCPI5_1@toc@l
; CHECK-NEXT: lvx 19, 0, 3
-; CHECK-NEXT: lvx 4, 0, 4
-; CHECK-NEXT: xxsel 34, 36, 51, 34
+; CHECK-NEXT: vadduwm 2, 2, 19
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
@@ -123,12 +108,9 @@ define <4 x i32> @sel_minus1_or_0_vec(<4 x i1> %cond) {
; CHECK: # BB#0:
; CHECK-NEXT: vspltisw 3, -16
; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
; CHECK-NEXT: vsubuwm 3, 4, 3
; CHECK-NEXT: vslw 2, 2, 3
; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 0, 51, 34
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %add
@@ -138,9 +120,6 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_minus1_or_0_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 0, 51, 34
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -150,14 +129,10 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_minus1_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: vspltisb 4, -1
+; CHECK-NEXT: xxland 34, 34, 35
+; CHECK-NEXT: vadduwm 2, 2, 4
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %add
@@ -167,9 +142,7 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK-LABEL: cmp_sel_0_or_minus1_vec:
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
-; CHECK-NEXT: vspltisb 19, -1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: xxlnor 34, 34, 34
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
@@ -179,14 +152,8 @@ define <4 x i32> @cmp_sel_0_or_minus1_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_1_or_0_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_1_or_0_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 0, 51, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: xxland 34, 34, 35
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
ret <4 x i32> %add
@@ -197,8 +164,7 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 0, 51, 34
+; CHECK-NEXT: xxland 34, 34, 51
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 1, i32 1, i32 1, i32 1>, <4 x i32> <i32 0, i32 0, i32 0, i32 0>
@@ -208,14 +174,8 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; CHECK-LABEL: sel_0_or_1_vec:
; CHECK: # BB#0:
-; CHECK-NEXT: vspltisw 3, -16
-; CHECK-NEXT: vspltisw 4, 15
-; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: vsubuwm 3, 4, 3
-; CHECK-NEXT: vslw 2, 2, 3
-; CHECK-NEXT: vsraw 2, 2, 3
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: vspltisw 3, 1
+; CHECK-NEXT: xxlandc 34, 35, 34
; CHECK-NEXT: blr
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %add
@@ -226,8 +186,8 @@ define <4 x i32> @cmp_sel_0_or_1_vec(<4 x i32> %x, <4 x i32> %y) {
; CHECK: # BB#0:
; CHECK-NEXT: vcmpequw 2, 2, 3
; CHECK-NEXT: vspltisw 19, 1
-; CHECK-NEXT: xxlxor 0, 0, 0
-; CHECK-NEXT: xxsel 34, 51, 0, 34
+; CHECK-NEXT: xxlnor 0, 34, 34
+; CHECK-NEXT: xxland 34, 0, 51
; CHECK-NEXT: blr
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
diff --git a/llvm/test/CodeGen/X86/vselect-avx.ll b/llvm/test/CodeGen/X86/vselect-avx.ll
index 5825a56b6f9..603437e35ea 100644
--- a/llvm/test/CodeGen/X86/vselect-avx.ll
+++ b/llvm/test/CodeGen/X86/vselect-avx.ll
@@ -151,21 +151,22 @@ define <32 x i8> @PR22706(<32 x i1> %x) {
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
+; AVX1-NEXT: vpaddb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: vandnps {{.*}}(%rip), %ymm0, %ymm1
-; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: PR22706:
; AVX2: ## BB#0:
; AVX2-NEXT: vpsllw $7, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
-; AVX2-NEXT: vpblendvb %ymm0, {{.*}}(%rip), %ymm1, %ymm0
+; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; AVX2-NEXT: vpcmpgtb %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpaddb {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
%tmp = select <32 x i1> %x, <32 x i8> <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>, <32 x i8> <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
ret <32 x i8> %tmp
diff --git a/llvm/test/CodeGen/X86/vselect-constants.ll b/llvm/test/CodeGen/X86/vselect-constants.ll
index 838c03500c6..4ce2ecfa739 100644
--- a/llvm/test/CodeGen/X86/vselect-constants.ll
+++ b/llvm/test/CodeGen/X86/vselect-constants.ll
@@ -8,6 +8,11 @@
; Each minimal select test is repeated with a more typical pattern that includes a compare to
; generate the condition value.
+; TODO: If we don't have blendv, this can definitely be improved. There's also a selection of
+; chips where it makes sense to transform the general case blendv to 2 bit-ops. That should be
+; a uarch-specfic transform. At some point (Ryzen?), the implementation should catch up to the
+; architecture, so blendv is as fast as a single bit-op.
+
define <4 x i32> @sel_C1_or_C2_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_C1_or_C2_vec:
; SSE: # BB#0:
@@ -53,19 +58,14 @@ define <4 x i32> @cmp_sel_C1_or_C2_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_Cplus1_or_C_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_Cplus1_or_C_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cplus1_or_C_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
ret <4 x i32> %add
@@ -75,17 +75,16 @@ define <4 x i32> @cmp_sel_Cplus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_Cplus1_or_C_vec:
; SSE: # BB#0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
+; SSE-NEXT: psubd %xmm0, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cplus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,0,4294967294,4294967295]
+; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
; AVX-NEXT: retq
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 42, i32 0, i32 -2, i32 -1>
@@ -97,17 +96,14 @@ define <4 x i32> @sel_Cminus1_or_C_vec(<4 x i1> %cond) {
; SSE: # BB#0:
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_Cminus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
ret <4 x i32> %add
@@ -117,17 +113,13 @@ define <4 x i32> @cmp_sel_Cminus1_or_C_vec(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: cmp_sel_Cminus1_or_C_vec:
; SSE: # BB#0:
; SSE-NEXT: pcmpeqd %xmm1, %xmm0
-; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm1
-; SSE-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: cmp_sel_Cminus1_or_C_vec:
; AVX: # BB#0:
; AVX-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [44,2,0,1]
-; AVX-NEXT: vblendvps %xmm0, {{.*}}(%rip), %xmm1, %xmm0
+; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%cond = icmp eq <4 x i32> %x, %y
%add = select <4 x i1> %cond, <4 x i32> <i32 43, i32 1, i32 -1, i32 0>, <4 x i32> <i32 44, i32 2, i32 0, i32 1>
@@ -168,18 +160,16 @@ define <4 x i32> @cmp_sel_minus1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_minus1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_minus1_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
+; SSE-NEXT: pand {{.*}}(%rip), %xmm0
; SSE-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE-NEXT: pxor %xmm1, %xmm0
+; SSE-NEXT: paddd %xmm1, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_minus1_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
+; AVX-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
ret <4 x i32> %add
@@ -238,17 +228,12 @@ define <4 x i32> @cmp_sel_1_or_0_vec(<4 x i32> %x, <4 x i32> %y) {
define <4 x i32> @sel_0_or_1_vec(<4 x i1> %cond) {
; SSE-LABEL: sel_0_or_1_vec:
; SSE: # BB#0:
-; SSE-NEXT: pslld $31, %xmm0
-; SSE-NEXT: psrad $31, %xmm0
-; SSE-NEXT: pandn {{.*}}(%rip), %xmm0
+; SSE-NEXT: andnps {{.*}}(%rip), %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sel_0_or_1_vec:
; AVX: # BB#0:
-; AVX-NEXT: vpslld $31, %xmm0, %xmm0
-; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
-; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1,1,1,1]
-; AVX-NEXT: vblendvps %xmm0, %xmm1, %xmm2, %xmm0
+; AVX-NEXT: vandnps {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
%add = select <4 x i1> %cond, <4 x i32> <i32 0, i32 0, i32 0, i32 0>, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
ret <4 x i32> %add
diff --git a/llvm/test/CodeGen/X86/widen_compare-1.ll b/llvm/test/CodeGen/X86/widen_compare-1.ll
index 8ea0db53a39..e8d993d2280 100644
--- a/llvm/test/CodeGen/X86/widen_compare-1.ll
+++ b/llvm/test/CodeGen/X86/widen_compare-1.ll
@@ -7,12 +7,12 @@
define <2 x i16> @compare_v2i64_to_v2i16(<2 x i16>* %src) nounwind {
; X86-LABEL: compare_v2i64_to_v2i16:
; X86: # BB#0:
-; X86-NEXT: movaps {{.*#+}} xmm0 = [65535,0,65535,0]
+; X86-NEXT: pcmpeqd %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: compare_v2i64_to_v2i16:
; X64: # BB#0:
-; X64-NEXT: movaps {{.*#+}} xmm0 = [65535,65535]
+; X64-NEXT: pcmpeqd %xmm0, %xmm0
; X64-NEXT: retq
%val = load <2 x i16>, <2 x i16>* %src, align 4
%cmp = icmp uge <2 x i16> %val, %val
OpenPOWER on IntegriCloud