summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2019-06-26 14:34:41 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2019-06-26 14:34:41 +0000
commitb13c6f1a9d4bda827bacf21e0a5cc3f37f5edda3 (patch)
tree6147cf43d268e994d3953f15bff4acff8878cc37 /llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
parent78edfc4bf06c0e738430c7ea398df9c59fe1edaf (diff)
downloadbcm5719-llvm-b13c6f1a9d4bda827bacf21e0a5cc3f37f5edda3.tar.gz
bcm5719-llvm-b13c6f1a9d4bda827bacf21e0a5cc3f37f5edda3.zip
[X86][SSE] X86TargetLowering::isBinOp - add PCMPGT
Allows narrowInsertExtractVectorBinOp to reduce vector size llvm-svn: 364431
Diffstat (limited to 'llvm/test/CodeGen/X86/horizontal-reduce-smax.ll')
-rw-r--r--llvm/test/CodeGen/X86/horizontal-reduce-smax.ll24
1 files changed, 12 insertions, 12 deletions
diff --git a/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll b/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
index 9ce634bfaea..c08f08f383b 100644
--- a/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
+++ b/llvm/test/CodeGen/X86/horizontal-reduce-smax.ll
@@ -468,10 +468,10 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X86-AVX2-LABEL: test_reduce_v4i64:
; X86-AVX2: ## %bb.0:
; X86-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; X86-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; X86-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X86-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: vpextrd $1, %xmm0, %edx
@@ -541,10 +541,10 @@ define i64 @test_reduce_v4i64(<4 x i64> %a0) {
; X64-AVX2-LABEL: test_reduce_v4i64:
; X64-AVX2: ## %bb.0:
; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; X64-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: vmovq %xmm0, %rax
; X64-AVX2-NEXT: vzeroupper
@@ -1135,10 +1135,10 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X86-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; X86-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X86-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; X86-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X86-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; X86-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X86-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X86-AVX2-NEXT: vmovd %xmm0, %eax
; X86-AVX2-NEXT: vpextrd $1, %xmm0, %edx
@@ -1252,10 +1252,10 @@ define i64 @test_reduce_v8i64(<8 x i64> %a0) {
; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
; X64-AVX2-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
-; X64-AVX2-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0
+; X64-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
+; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[2,3,0,1]
-; X64-AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm2
+; X64-AVX2-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm2
; X64-AVX2-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0
; X64-AVX2-NEXT: vmovq %xmm0, %rax
; X64-AVX2-NEXT: vzeroupper
OpenPOWER on IntegriCloud