diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-12-05 04:51:28 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-12-05 04:51:28 +0000 |
| commit | 227d4279a8e176ee196b6b6aa5e778e4986a1f6e (patch) | |
| tree | 4d964562ea814e8352dec6734738cea91b03cc29 /llvm/test/CodeGen | |
| parent | 81707549191eae0aa7b571165ebd030dd82a50f3 (diff) | |
| download | bcm5719-llvm-227d4279a8e176ee196b6b6aa5e778e4986a1f6e.tar.gz bcm5719-llvm-227d4279a8e176ee196b6b6aa5e778e4986a1f6e.zip | |
[AVX-512] Add avx512f command lines to fast isel SSE select test.
Currently the fast isel code emits an avx1 instruction sequence even with avx512. This is different than normal isel. A follow up commit will fix this.
llvm-svn: 288635
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/fast-isel-select-sse.ll | 314 |
1 files changed, 314 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/fast-isel-select-sse.ll b/llvm/test/CodeGen/X86/fast-isel-select-sse.ll index 026732d8ce5..9c24118e41d 100644 --- a/llvm/test/CodeGen/X86/fast-isel-select-sse.ll +++ b/llvm/test/CodeGen/X86/fast-isel-select-sse.ll @@ -3,6 +3,8 @@ ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 | FileCheck %s --check-prefix=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx | FileCheck %s --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512SLOW +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -fast-isel -fast-isel-abort=1 -mattr=avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512FAST ; Test all cmp predicates that can be used with SSE. @@ -21,6 +23,19 @@ define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_oeq_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpeqss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_oeq_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpeqss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp oeq float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -41,6 +56,19 @@ define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_oeq_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpeqsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_oeq_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpeqsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp oeq double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -62,6 +90,19 @@ define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ogt_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpltss %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ogt_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpltss %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ogt float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -83,6 +124,19 @@ define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ogt_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpltsd %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ogt_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpltsd %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ogt double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -104,6 +158,19 @@ define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_oge_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpless %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_oge_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpless %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp oge float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -125,6 +192,19 @@ define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_oge_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmplesd %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_oge_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmplesd %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp oge double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -145,6 +225,19 @@ define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_olt_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpltss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_olt_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpltss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp olt float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -165,6 +258,19 @@ define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_olt_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpltsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_olt_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpltsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp olt double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -185,6 +291,19 @@ define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ole_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpless %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ole_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpless %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ole float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -205,6 +324,19 @@ define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ole_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmplesd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ole_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmplesd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ole double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -225,6 +357,19 @@ define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ord_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpordss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ord_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpordss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ord float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -245,6 +390,19 @@ define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ord_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpordsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ord_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpordsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ord double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -265,6 +423,19 @@ define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_uno_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpunordss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_uno_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpunordss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp uno float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -285,6 +456,19 @@ define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_uno_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpunordsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_uno_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpunordsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp uno double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -305,6 +489,19 @@ define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ugt_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnless %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ugt_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnless %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ugt float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -325,6 +522,19 @@ define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ugt_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnlesd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ugt_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnlesd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ugt double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -345,6 +555,19 @@ define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_uge_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnltss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_uge_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnltss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp uge float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -365,6 +588,19 @@ define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_uge_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnltsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_uge_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnltsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp uge double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -386,6 +622,19 @@ define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ult_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnless %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ult_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnless %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ult float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -407,6 +656,19 @@ define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ult_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnlesd %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ult_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ult double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -428,6 +690,19 @@ define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ule_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnltss %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ule_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnltss %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ule float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -449,6 +724,19 @@ define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_ule_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpnltsd %xmm0, %xmm1, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_ule_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp ule double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 @@ -469,6 +757,19 @@ define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) { ; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_une_f32: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpneqss %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovss %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovaps %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_une_f32: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpneqss %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp une float %a, %b %2 = select i1 %1, float %c, float %d ret float %2 @@ -489,6 +790,19 @@ define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) { ; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 ; AVX-NEXT: retq ; +; AVX512SLOW-LABEL: select_fcmp_une_f64: +; AVX512SLOW: # BB#0: +; AVX512SLOW-NEXT: vcmpneqsd %xmm1, %xmm0, %k1 +; AVX512SLOW-NEXT: vmovsd %xmm2, %xmm0, %xmm3 {%k1} +; AVX512SLOW-NEXT: vmovapd %xmm3, %xmm0 +; AVX512SLOW-NEXT: retq +; +; AVX512FAST-LABEL: select_fcmp_une_f64: +; AVX512FAST: # BB#0: +; AVX512FAST-NEXT: vcmpneqsd %xmm1, %xmm0, %xmm0 +; AVX512FAST-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0 +; AVX512FAST-NEXT: retq +; %1 = fcmp une double %a, %b %2 = select i1 %1, double %c, double %d ret double %2 |

