summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll')
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll42
1 files changed, 21 insertions, 21 deletions
diff --git a/llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll b/llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll
index 8cdf00c22a6..9dbc6338d31 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-umin-widen.ll
@@ -169,14 +169,14 @@ define i64 @test_v4i64(<4 x i64> %a0) {
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
-; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vpxor %ymm2, %ymm1, %ymm4
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vpxor %xmm2, %xmm0, %xmm3
+; AVX2-NEXT: vpxor %xmm2, %xmm1, %xmm4
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm2
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
+; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: vzeroupper
@@ -362,14 +362,14 @@ define i64 @test_v8i64(<8 x i64> %a0) {
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm4
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm4, %ymm3
-; AVX2-NEXT: vblendvpd %ymm3, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
+; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm4
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm3
+; AVX2-NEXT: vblendvpd %xmm3, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %ymm2, %ymm0, %ymm3
-; AVX2-NEXT: vxorpd %ymm2, %ymm1, %ymm2
-; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vxorpd %xmm2, %xmm0, %xmm3
+; AVX2-NEXT: vxorpd %xmm2, %xmm1, %xmm2
+; AVX2-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: vzeroupper
@@ -692,14 +692,14 @@ define i64 @test_v16i64(<16 x i64> %a0) {
; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
-; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
-; AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm2
+; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm3
+; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
+; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; AVX2-NEXT: vxorpd %ymm4, %ymm0, %ymm2
-; AVX2-NEXT: vxorpd %ymm4, %ymm1, %ymm3
-; AVX2-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2
+; AVX2-NEXT: vxorpd %xmm4, %xmm0, %xmm2
+; AVX2-NEXT: vxorpd %xmm4, %xmm1, %xmm3
+; AVX2-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2
; AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; AVX2-NEXT: vmovq %xmm0, %rax
; AVX2-NEXT: vzeroupper
OpenPOWER on IntegriCloud