summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2019-01-03 22:42:32 +0000
committerSanjay Patel <spatel@rotateright.com>2019-01-03 22:42:32 +0000
commitac23c468830d26c9b313f1173f3abcd0254c9106 (patch)
tree6d3f22f351821660daa114eb6ad99bffe0e8ad0d
parentc78931003d5dfbab0fdc8610368c4f568469a283 (diff)
downloadbcm5719-llvm-ac23c468830d26c9b313f1173f3abcd0254c9106.tar.gz
bcm5719-llvm-ac23c468830d26c9b313f1173f3abcd0254c9106.zip
[x86] add AVX512 runs for horizontal ops; NFC
llvm-svn: 350362
-rw-r--r--llvm/test/CodeGen/X86/haddsub-undef.ll47
-rw-r--r--llvm/test/CodeGen/X86/phaddsub-undef.ll23
2 files changed, 55 insertions, 15 deletions
diff --git a/llvm/test/CodeGen/X86/haddsub-undef.ll b/llvm/test/CodeGen/X86/haddsub-undef.ll
index df965cd780c..56e01cb8abe 100644
--- a/llvm/test/CodeGen/X86/haddsub-undef.ll
+++ b/llvm/test/CodeGen/X86/haddsub-undef.ll
@@ -1,8 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3 | FileCheck %s --check-prefixes=SSE,SSE-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse3,fast-hops | FileCheck %s --check-prefixes=SSE,SSE-FAST
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX1-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512-FAST
; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
@@ -484,17 +486,24 @@ define <4 x float> @add_ps_007_2(<4 x float> %x) {
; SSE-FAST-NEXT: haddps %xmm0, %xmm0
; SSE-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: add_ps_007_2:
-; AVX-SLOW: # %bb.0:
-; AVX-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-SLOW-NEXT: retq
+; AVX1-SLOW-LABEL: add_ps_007_2:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: add_ps_007_2:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: add_ps_007_2:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
%add = fadd <4 x float> %l, %r
@@ -576,19 +585,27 @@ define <4 x float> @add_ps_018(<4 x float> %x) {
; SSE-FAST-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-FAST-NEXT: retq
;
-; AVX-SLOW-LABEL: add_ps_018:
-; AVX-SLOW: # %bb.0:
-; AVX-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
-; AVX-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
-; AVX-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
-; AVX-SLOW-NEXT: retq
+; AVX1-SLOW-LABEL: add_ps_018:
+; AVX1-SLOW: # %bb.0:
+; AVX1-SLOW-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX1-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX1-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX1-SLOW-NEXT: retq
;
; AVX-FAST-LABEL: add_ps_018:
; AVX-FAST: # %bb.0:
; AVX-FAST-NEXT: vhaddps %xmm0, %xmm0, %xmm0
; AVX-FAST-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX-FAST-NEXT: retq
+;
+; AVX512-SLOW-LABEL: add_ps_018:
+; AVX512-SLOW: # %bb.0:
+; AVX512-SLOW-NEXT: vbroadcastss %xmm0, %xmm1
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,1,3]
+; AVX512-SLOW-NEXT: vaddps %xmm0, %xmm1, %xmm0
+; AVX512-SLOW-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; AVX512-SLOW-NEXT: retq
%l = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 0, i32 undef>
%r = shufflevector <4 x float> %x, <4 x float> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 undef>
%add = fadd <4 x float> %l, %r
diff --git a/llvm/test/CodeGen/X86/phaddsub-undef.ll b/llvm/test/CodeGen/X86/phaddsub-undef.ll
index 161d057af1d..740db28bb08 100644
--- a/llvm/test/CodeGen/X86/phaddsub-undef.ll
+++ b/llvm/test/CodeGen/X86/phaddsub-undef.ll
@@ -5,6 +5,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX1,AVX1-FAST
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX2,AVX2-SLOW
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX2,AVX2-FAST
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX-SLOW,AVX512,AVX512-SLOW
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl,fast-hops | FileCheck %s --check-prefixes=AVX,AVX-FAST,AVX512,AVX512-FAST
; Verify that we correctly fold horizontal binop even in the presence of UNDEFs.
@@ -23,6 +25,11 @@ define <8 x i32> @test14_undef(<8 x i32> %a, <8 x i32> %b) {
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test14_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm1, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
@@ -70,6 +77,11 @@ define <8 x i32> @test15_undef(<8 x i32> %a, <8 x i32> %b) {
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test15_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
@@ -96,6 +108,11 @@ define <8 x i32> @test16_undef(<8 x i32> %a, <8 x i32> %b) {
; AVX2: # %bb.0:
; AVX2-NEXT: vphaddd %ymm0, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test16_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vphaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add = add i32 %vecext, %vecext1
@@ -124,6 +141,12 @@ define <8 x i32> @test17_undef(<8 x i32> %a, <8 x i32> %b) {
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vphaddd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
+;
+; AVX512-LABEL: test17_undef:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX512-NEXT: vphaddd %xmm1, %xmm0, %xmm0
+; AVX512-NEXT: retq
%vecext = extractelement <8 x i32> %a, i32 0
%vecext1 = extractelement <8 x i32> %a, i32 1
%add1 = add i32 %vecext, %vecext1
OpenPOWER on IntegriCloud