diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-trunc-usat.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-trunc-usat.ll | 237 |
1 files changed, 115 insertions, 122 deletions
diff --git a/llvm/test/CodeGen/X86/vector-trunc-usat.ll b/llvm/test/CodeGen/X86/vector-trunc-usat.ll index dcec70ea6d7..23cb25df8e0 100644 --- a/llvm/test/CodeGen/X86/vector-trunc-usat.ll +++ b/llvm/test/CodeGen/X86/vector-trunc-usat.ll @@ -112,17 +112,17 @@ define <4 x i32> @trunc_usat_v4i64_v4i32(<4 x i64> %a0) { ; ; AVX1-LABEL: trunc_usat_v4i64_v4i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm1 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm1, %xmm0, %xmm2 ; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [9223372041149743103,9223372041149743103] -; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 -; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm2 ; AVX1-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 -; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,429496729] -; AVX1-NEXT: vblendvpd %ymm1, %ymm0, %ymm2, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4 +; AVX1-NEXT: vpxor %xmm1, %xmm4, %xmm1 +; AVX1-NEXT: vpcmpgtq %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vmovapd {{.*#+}} xmm3 = [4294967295,429496729] +; AVX1-NEXT: vblendvpd %xmm1, %xmm4, %xmm3, %xmm1 +; AVX1-NEXT: vmovapd {{.*#+}} xmm3 = [4294967295,4294967295] +; AVX1-NEXT: vblendvpd %xmm2, %xmm0, %xmm3, %xmm0 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2] ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq @@ -380,26 +380,24 @@ define <8 x i32> @trunc_usat_v8i64_v8i32(<8 x i64> %a0) { ; ; AVX1-LABEL: trunc_usat_v8i64_v8i32: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103] -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372041149743103,9223372041149743103] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpxor %xmm2, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm6 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [4294967295,4294967295] +; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1 ; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm8, %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-NEXT: retq @@ -648,26 +646,24 @@ define <8 x i16> @trunc_usat_v8i64_v8i16(<8 x i64> %a0) { ; ; AVX1-LABEL: trunc_usat_v8i64_v8i16: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [65535,65535,65535,65535] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854841343,9223372036854841343] -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854841343,9223372036854841343] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpxor %xmm2, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm6 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [65535,65535] +; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm8, %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper @@ -1129,26 +1125,24 @@ define <8 x i8> @trunc_usat_v8i64_v8i8(<8 x i64> %a0) { ; ; AVX1-LABEL: trunc_usat_v8i64_v8i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063] -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpxor %xmm2, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm6 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [255,255] +; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm8, %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vzeroupper @@ -1368,26 +1362,24 @@ define void @trunc_usat_v8i64_v8i8_store(<8 x i64> %a0, <8 x i8> *%p1) { ; ; AVX1-LABEL: trunc_usat_v8i64_v8i8_store: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd {{.*#+}} ymm2 = [255,255,255,255] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854776063,9223372036854776063] -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm6 -; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm2, %xmm0, %xmm3 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [9223372036854776063,9223372036854776063] +; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8 +; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 +; AVX1-NEXT: vpxor %xmm2, %xmm5, %xmm6 +; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm6 +; AVX1-NEXT: vpxor %xmm2, %xmm1, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm7 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3 -; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 -; AVX1-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2 +; AVX1-NEXT: vpxor %xmm2, %xmm3, %xmm2 +; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm2 +; AVX1-NEXT: vmovapd {{.*#+}} xmm4 = [255,255] +; AVX1-NEXT: vblendvpd %xmm2, %xmm3, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2 +; AVX1-NEXT: vblendvpd %xmm6, %xmm5, %xmm4, %xmm2 +; AVX1-NEXT: vblendvpd %xmm8, %xmm0, %xmm4, %xmm0 ; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0 ; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 ; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0 @@ -1770,48 +1762,49 @@ define <16 x i8> @trunc_usat_v16i64_v16i8(<16 x i64> %a0) { ; ; AVX1-LABEL: trunc_usat_v16i64_v16i8: ; AVX1: # %bb.0: -; AVX1-NEXT: vmovapd {{.*#+}} ymm8 = [255,255,255,255] -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372036854775808,9223372036854775808] -; AVX1-NEXT: vpxor %xmm6, %xmm5, %xmm5 -; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [9223372036854776063,9223372036854776063] -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vpxor %xmm6, %xmm0, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm4, %ymm4 -; AVX1-NEXT: vblendvpd %ymm4, %ymm0, %ymm8, %ymm0 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm1, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendvpd %ymm4, %ymm1, %ymm8, %ymm1 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm2, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendvpd %ymm4, %ymm2, %ymm8, %ymm2 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm4, %xmm4 -; AVX1-NEXT: vpcmpgtq %xmm4, %xmm7, %xmm4 -; AVX1-NEXT: vpxor %xmm6, %xmm3, %xmm5 -; AVX1-NEXT: vpcmpgtq %xmm5, %xmm7, %xmm5 -; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4 -; AVX1-NEXT: vblendvpd %ymm4, %ymm3, %ymm8, %ymm3 -; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm4 -; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 -; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4 +; AVX1-NEXT: vmovdqa %ymm0, %ymm8 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372036854775808,9223372036854775808] +; AVX1-NEXT: vpxor %xmm5, %xmm8, %xmm4 +; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372036854776063,9223372036854776063] +; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm11 +; AVX1-NEXT: vpxor %xmm5, %xmm11, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm0 +; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill +; AVX1-NEXT: vpxor %xmm5, %xmm1, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm10 +; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm14 +; AVX1-NEXT: vpxor %xmm5, %xmm14, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm12 +; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm7 +; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm13 +; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm7 +; AVX1-NEXT: vpxor %xmm5, %xmm7, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm15 +; AVX1-NEXT: vpxor %xmm5, %xmm3, %xmm4 +; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4 +; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm0 +; AVX1-NEXT: vpxor %xmm5, %xmm0, %xmm5 +; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5 +; AVX1-NEXT: vmovapd {{.*#+}} xmm6 = [255,255] +; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm6, %xmm9 +; AVX1-NEXT: vblendvpd %xmm4, %xmm3, %xmm6, %xmm3 +; AVX1-NEXT: vblendvpd %xmm15, %xmm7, %xmm6, %xmm4 +; AVX1-NEXT: vblendvpd %xmm13, %xmm2, %xmm6, %xmm2 +; AVX1-NEXT: vblendvpd %xmm12, %xmm14, %xmm6, %xmm5 +; AVX1-NEXT: vblendvpd %xmm10, %xmm1, %xmm6, %xmm1 +; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: vblendvpd %xmm0, %xmm11, %xmm6, %xmm7 +; AVX1-NEXT: vmovapd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload +; AVX1-NEXT: vblendvpd %xmm0, %xmm8, %xmm6, %xmm6 +; AVX1-NEXT: vpackusdw %xmm9, %xmm3, %xmm0 ; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2 -; AVX1-NEXT: vpackusdw %xmm3, %xmm2, %xmm2 -; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3 -; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 -; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3 -; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 -; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0 -; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vpackusdw %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vpackusdw %xmm5, %xmm1, %xmm1 +; AVX1-NEXT: vpackusdw %xmm7, %xmm6, %xmm2 +; AVX1-NEXT: vpackusdw %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vpackuswb %xmm0, %xmm1, %xmm0 ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retq ; |

