summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-trunc-packus.ll
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-06-07 13:17:46 +0000
committerSanjay Patel <spatel@rotateright.com>2019-06-07 13:17:46 +0000
commit6880bceda2df17f68e319c86a78642125086e0b8 (patch)
tree936eca4e86abcdbd7788df22c7cbd579b61b21c0 /llvm/test/CodeGen/X86/vector-trunc-packus.ll
parent0723c659f5838a5f67cd6ef5133f7d0e9464b122 (diff)
downloadbcm5719-llvm-6880bceda2df17f68e319c86a78642125086e0b8.tar.gz
bcm5719-llvm-6880bceda2df17f68e319c86a78642125086e0b8.zip
[x86] narrow extract subvector of vector select
This is a potentially large perf win for AVX1 targets because of the way we auto-vectorize to 256-bit but then expect the backend to legalize/optimize for the half-implemented AVX1 ISA. On the motivating example from PR37428 (even though this patch doesn't solve the vector shift issue): https://bugs.llvm.org/show_bug.cgi?id=37428 ...there's a 16% speedup when compiling with "-mavx" (perf tested on Haswell) because we eliminate the remaining 256-bit vblendv ops. I added comments on a couple of tests that require further work. If we have 256-bit logic ops separating the vselect and extract, we should probably narrow everything to 128-bit, but that requires a larger pattern match. Differential Revision: https://reviews.llvm.org/D62969 llvm-svn: 362797
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-trunc-packus.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-trunc-packus.ll285
1 files changed, 133 insertions, 152 deletions
diff --git a/llvm/test/CodeGen/X86/vector-trunc-packus.ll b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
index 9017e1915c0..18917cf96ce 100644
--- a/llvm/test/CodeGen/X86/vector-trunc-packus.ll
+++ b/llvm/test/CodeGen/X86/vector-trunc-packus.ll
@@ -181,19 +181,17 @@ define <4 x i32> @trunc_packus_v4i64_v4i32(<4 x i64> %a0) {
;
; AVX1-LABEL: trunc_packus_v4i64_v4i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm1 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm3
-; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
-; AVX1-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
-; AVX1-NEXT: vxorpd %xmm1, %xmm1, %xmm1
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1
-; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [4294967295,4294967295]
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm2, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm5
+; AVX1-NEXT: vblendvpd %xmm3, %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@@ -583,30 +581,27 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(<8 x i64> %a0) {
;
; AVX1-LABEL: trunc_packus_v8i64_v8i32:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4294967295,4294967295]
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [4294967295,4294967295]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
-; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
@@ -996,30 +991,27 @@ define <8 x i16> @trunc_packus_v8i64_v8i16(<8 x i64> %a0) {
;
; AVX1-LABEL: trunc_packus_v8i64_v8i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [65535,65535]
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [65535,65535]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
@@ -1628,30 +1620,27 @@ define <8 x i8> @trunc_packus_v8i64_v8i8(<8 x i64> %a0) {
;
; AVX1-LABEL: trunc_packus_v8i64_v8i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vzeroupper
@@ -2011,30 +2000,27 @@ define void @trunc_packus_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) {
;
; AVX1-LABEL: trunc_packus_v8i64_v8i8_store:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255]
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255]
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm5
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm4
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
-; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
-; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm0, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm1, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm7, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255]
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm8
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm5
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm6, %xmm3, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm3, %xmm4
+; AVX1-NEXT: vblendvpd %xmm4, %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm9
+; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm3, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm7
+; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm5
+; AVX1-NEXT: vblendvpd %xmm8, %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpand %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm2
-; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
+; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm2
+; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
@@ -2689,56 +2675,51 @@ define <16 x i8> @trunc_packus_v16i64_v16i8(<16 x i64> %a0) {
;
; AVX1-LABEL: trunc_packus_v16i64_v16i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovapd {{.*#+}} ymm4 = [255,255,255,255]
-; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm5
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [255,255]
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm3, %xmm6, %xmm7
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT: vblendvpd %ymm5, %ymm3, %ymm4, %ymm15
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm2, %xmm6, %xmm7
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT: vblendvpd %ymm5, %ymm2, %ymm4, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm1, %xmm6, %xmm7
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
-; AVX1-NEXT: vblendvpd %ymm5, %ymm1, %ymm4, %ymm1
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm0, %xmm6, %xmm6
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
-; AVX1-NEXT: vblendvpd %ymm5, %ymm0, %ymm4, %ymm0
-; AVX1-NEXT: vxorpd %xmm4, %xmm4, %xmm4
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm0, %xmm8
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm14
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm14, %xmm9
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm1, %xmm10
+; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm8
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [255,255]
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm11
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm2, %xmm12
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm5
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm13
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm15, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm3
-; AVX1-NEXT: vpcmpgtq %xmm4, %xmm3, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; AVX1-NEXT: vpcmpgtq %xmm0, %xmm5, %xmm6
+; AVX1-NEXT: vblendvpd %xmm6, %xmm0, %xmm5, %xmm10
+; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm6
+; AVX1-NEXT: vblendvpd %xmm6, %xmm4, %xmm5, %xmm11
+; AVX1-NEXT: vpcmpgtq %xmm1, %xmm5, %xmm6
+; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm6
+; AVX1-NEXT: vblendvpd %xmm6, %xmm7, %xmm5, %xmm6
+; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm7
+; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm9, %xmm5, %xmm7
+; AVX1-NEXT: vblendvpd %xmm7, %xmm9, %xmm5, %xmm7
+; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm0
+; AVX1-NEXT: vblendvpd %xmm0, %xmm3, %xmm5, %xmm0
+; AVX1-NEXT: vpcmpgtq %xmm8, %xmm5, %xmm3
+; AVX1-NEXT: vblendvpd %xmm3, %xmm8, %xmm5, %xmm3
+; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm15, %xmm6, %xmm4
-; AVX1-NEXT: vpackusdw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpand %xmm5, %xmm13, %xmm4
-; AVX1-NEXT: vpand %xmm2, %xmm12, %xmm2
-; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpand %xmm7, %xmm11, %xmm3
-; AVX1-NEXT: vpand %xmm1, %xmm10, %xmm1
-; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
-; AVX1-NEXT: vpand %xmm14, %xmm9, %xmm3
-; AVX1-NEXT: vpand %xmm0, %xmm8, %xmm0
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm4
+; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0
-; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
-; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm3
+; AVX1-NEXT: vpand %xmm7, %xmm3, %xmm3
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm4
+; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
+; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm2
+; AVX1-NEXT: vpand %xmm6, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm3
+; AVX1-NEXT: vpand %xmm1, %xmm3, %xmm1
+; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm11, %xmm2
+; AVX1-NEXT: vpand %xmm11, %xmm2, %xmm2
+; AVX1-NEXT: vpcmpgtq %xmm5, %xmm10, %xmm3
+; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm2, %xmm3, %xmm2
+; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1
+; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
OpenPOWER on IntegriCloud