summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/machine-combiner.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-06-02 00:07:48 +0000
committerCraig Topper <craig.topper@intel.com>2019-06-02 00:07:48 +0000
commiteeaecc63e933335c305263702eb1e3f9caab1f6c (patch)
treea2f257985da8aeb809a2ed1263e12ef233a9d8fd /llvm/test/CodeGen/X86/machine-combiner.ll
parent5a2a054028c27d0716faf9b513f3fb5c8723400f (diff)
downloadbcm5719-llvm-eeaecc63e933335c305263702eb1e3f9caab1f6c.tar.gz
bcm5719-llvm-eeaecc63e933335c305263702eb1e3f9caab1f6c.zip
[X86] Add avx512 command lines and test cases to machine-combiner.ll
llvm-svn: 362307
Diffstat (limited to 'llvm/test/CodeGen/X86/machine-combiner.ll')
-rw-r--r--llvm/test/CodeGen/X86/machine-combiner.ll591
1 files changed, 543 insertions, 48 deletions
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll
index a1b2fba1e49..162cda8c867 100644
--- a/llvm/test/CodeGen/X86/machine-combiner.ll
+++ b/llvm/test/CodeGen/X86/machine-combiner.ll
@@ -1,10 +1,13 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx512vl -enable-unsafe-fp-math -machine-combiner-verify-pattern-order=true < %s | FileCheck %s --check-prefixes=AVX,AVX512
; Incremental updates of the instruction depths should be enough for this test
; case.
; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse -enable-unsafe-fp-math -machine-combiner-inc-threshold=0 < %s | FileCheck %s --check-prefix=SSE
-; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math -machine-combiner-inc-threshold=0 < %s | FileCheck %s --check-prefix=AVX
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx -enable-unsafe-fp-math -machine-combiner-inc-threshold=0 < %s | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx512vl -enable-unsafe-fp-math -machine-combiner-inc-threshold=0 < %s | FileCheck %s --check-prefixes=AVX,AVX512
; Verify that the first two adds are independent regardless of how the inputs are
; commuted. The destination registers are used as source registers for the third add.
@@ -225,12 +228,18 @@ define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4
; SSE-NEXT: addps %xmm2, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: reassociate_adds_v4f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vaddps %xmm3, %xmm2, %xmm1
-; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: reassociate_adds_v4f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vaddps %xmm3, %xmm2, %xmm1
+; AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v4f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
+; AVX512-NEXT: vaddps %xmm0, %xmm3, %xmm0
+; AVX512-NEXT: retq
%t0 = fmul <4 x float> %x0, %x1
%t1 = fadd <4 x float> %x2, %t0
%t2 = fadd <4 x float> %x3, %t1
@@ -247,12 +256,18 @@ define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1,
; SSE-NEXT: addpd %xmm2, %xmm0
; SSE-NEXT: retq
;
-; AVX-LABEL: reassociate_adds_v2f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vaddpd %xmm3, %xmm2, %xmm1
-; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
+; AVX1-LABEL: reassociate_adds_v2f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vaddpd %xmm3, %xmm2, %xmm1
+; AVX1-NEXT: vaddpd %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v2f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2
+; AVX512-NEXT: vaddpd %xmm0, %xmm3, %xmm0
+; AVX512-NEXT: retq
%t0 = fmul <2 x double> %x0, %x1
%t1 = fadd <2 x double> %x2, %t0
%t2 = fadd <2 x double> %x3, %t1
@@ -306,12 +321,28 @@ define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1,
; Verify that AVX 256-bit vector single-precision adds are reassociated.
define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
-; AVX-LABEL: reassociate_adds_v8f32:
-; AVX: # %bb.0:
-; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1
-; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
+; SSE-LABEL: reassociate_adds_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: mulps %xmm2, %xmm0
+; SSE-NEXT: mulps %xmm3, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm4
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm7, %xmm5
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_adds_v8f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm3, %ymm2, %ymm1
+; AVX1-NEXT: vaddps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v8f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
+; AVX512-NEXT: vaddps %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: retq
%t0 = fmul <8 x float> %x0, %x1
%t1 = fadd <8 x float> %x2, %t0
%t2 = fadd <8 x float> %x3, %t1
@@ -321,12 +352,28 @@ define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8
; Verify that AVX 256-bit vector double-precision adds are reassociated.
define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
-; AVX-LABEL: reassociate_adds_v4f64:
-; AVX: # %bb.0:
-; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1
-; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
-; AVX-NEXT: retq
+; SSE-LABEL: reassociate_adds_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: mulpd %xmm2, %xmm0
+; SSE-NEXT: mulpd %xmm3, %xmm1
+; SSE-NEXT: addpd %xmm6, %xmm4
+; SSE-NEXT: addpd %xmm4, %xmm0
+; SSE-NEXT: addpd %xmm7, %xmm5
+; SSE-NEXT: addpd %xmm5, %xmm1
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_adds_v4f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulpd %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm3, %ymm2, %ymm1
+; AVX1-NEXT: vaddpd %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v4f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2
+; AVX512-NEXT: vaddpd %ymm0, %ymm3, %ymm0
+; AVX512-NEXT: retq
%t0 = fmul <4 x double> %x0, %x1
%t1 = fadd <4 x double> %x2, %t0
%t2 = fadd <4 x double> %x3, %t1
@@ -336,6 +383,16 @@ define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1,
; Verify that AVX 256-bit vector single-precision multiplies are reassociated.
define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; SSE-LABEL: reassociate_muls_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: mulps %xmm6, %xmm4
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm7, %xmm5
+; SSE-NEXT: mulps %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_muls_v8f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -351,6 +408,16 @@ define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8
; Verify that AVX 256-bit vector double-precision multiplies are reassociated.
define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; SSE-LABEL: reassociate_muls_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm1
+; SSE-NEXT: mulpd %xmm6, %xmm4
+; SSE-NEXT: mulpd %xmm4, %xmm0
+; SSE-NEXT: mulpd %xmm7, %xmm5
+; SSE-NEXT: mulpd %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_muls_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -363,6 +430,168 @@ define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1,
ret <4 x double> %t2
}
+; Verify that AVX512 512-bit vector single-precision adds are reassociated.
+
+define <16 x float> @reassociate_adds_v16f32(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, <16 x float> %x3) {
+; SSE-LABEL: reassociate_adds_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: mulps %xmm4, %xmm0
+; SSE-NEXT: mulps %xmm5, %xmm1
+; SSE-NEXT: mulps %xmm6, %xmm2
+; SSE-NEXT: mulps %xmm7, %xmm3
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: addps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_adds_v16f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmulps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vaddps %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vaddps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v16f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
+; AVX512-NEXT: vaddps %zmm0, %zmm3, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fmul <16 x float> %x0, %x1
+ %t1 = fadd <16 x float> %x2, %t0
+ %t2 = fadd <16 x float> %x3, %t1
+ ret <16 x float> %t2
+}
+
+; Verify that AVX512 512-bit vector double-precision adds are reassociated.
+
+define <8 x double> @reassociate_adds_v8f64(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, <8 x double> %x3) {
+; SSE-LABEL: reassociate_adds_v8f64:
+; SSE: # %bb.0:
+; SSE-NEXT: mulpd %xmm4, %xmm0
+; SSE-NEXT: mulpd %xmm5, %xmm1
+; SSE-NEXT: mulpd %xmm6, %xmm2
+; SSE-NEXT: mulpd %xmm7, %xmm3
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: addpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_adds_v8f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmulpd %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vaddpd %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vaddpd %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_adds_v8f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2
+; AVX512-NEXT: vaddpd %zmm0, %zmm3, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fmul <8 x double> %x0, %x1
+ %t1 = fadd <8 x double> %x2, %t0
+ %t2 = fadd <8 x double> %x3, %t1
+ ret <8 x double> %t2
+}
+
+; Verify that AVX512 512-bit vector single-precision multiplies are reassociated.
+
+define <16 x float> @reassociate_muls_v16f32(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, <16 x float> %x3) {
+; SSE-LABEL: reassociate_muls_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm2
+; SSE-NEXT: addps %xmm7, %xmm3
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: mulps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_muls_v16f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vmulps %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vmulps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmulps %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vmulps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_muls_v16f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vmulps %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vmulps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <16 x float> %x0, %x1
+ %t1 = fmul <16 x float> %x2, %t0
+ %t2 = fmul <16 x float> %x3, %t1
+ ret <16 x float> %t2
+}
+
+; Verify that AVX512 512-bit vector double-precision multiplies are reassociated.
+
+define <8 x double> @reassociate_muls_v8f64(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, <8 x double> %x3) {
+; SSE-LABEL: reassociate_muls_v8f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm4, %xmm0
+; SSE-NEXT: addpd %xmm5, %xmm1
+; SSE-NEXT: addpd %xmm6, %xmm2
+; SSE-NEXT: addpd %xmm7, %xmm3
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: mulpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_muls_v8f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vmulpd %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vmulpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmulpd %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vmulpd %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_muls_v8f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vmulpd %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vmulpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <8 x double> %x0, %x1
+ %t1 = fmul <8 x double> %x2, %t0
+ %t2 = fmul <8 x double> %x3, %t1
+ ret <8 x double> %t2
+}
+
; Verify that SSE and AVX scalar single-precision minimum ops are reassociated.
define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3) {
@@ -558,6 +787,16 @@ define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1,
; Verify that AVX 256-bit vector single-precision minimum ops are reassociated.
define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; SSE-LABEL: reassociate_mins_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: minps %xmm6, %xmm4
+; SSE-NEXT: minps %xmm4, %xmm0
+; SSE-NEXT: minps %xmm7, %xmm5
+; SSE-NEXT: minps %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_mins_v8f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -575,6 +814,16 @@ define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8
; Verify that AVX 256-bit vector single-precision maximum ops are reassociated.
define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) {
+; SSE-LABEL: reassociate_maxs_v8f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm2, %xmm0
+; SSE-NEXT: addps %xmm3, %xmm1
+; SSE-NEXT: maxps %xmm6, %xmm4
+; SSE-NEXT: maxps %xmm4, %xmm0
+; SSE-NEXT: maxps %xmm7, %xmm5
+; SSE-NEXT: maxps %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_maxs_v8f32:
; AVX: # %bb.0:
; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -592,6 +841,16 @@ define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8
; Verify that AVX 256-bit vector double-precision minimum ops are reassociated.
define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; SSE-LABEL: reassociate_mins_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm1
+; SSE-NEXT: minpd %xmm6, %xmm4
+; SSE-NEXT: minpd %xmm4, %xmm0
+; SSE-NEXT: minpd %xmm7, %xmm5
+; SSE-NEXT: minpd %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_mins_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -609,6 +868,16 @@ define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1,
; Verify that AVX 256-bit vector double-precision maximum ops are reassociated.
define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) {
+; SSE-LABEL: reassociate_maxs_v4f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm2, %xmm0
+; SSE-NEXT: addpd %xmm3, %xmm1
+; SSE-NEXT: maxpd %xmm6, %xmm4
+; SSE-NEXT: maxpd %xmm4, %xmm0
+; SSE-NEXT: maxpd %xmm7, %xmm5
+; SSE-NEXT: maxpd %xmm5, %xmm1
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_maxs_v4f64:
; AVX: # %bb.0:
; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0
@@ -623,24 +892,223 @@ define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1,
ret <4 x double> %sel2
}
+; Verify that AVX512 512-bit vector single-precision minimum ops are reassociated.
+
+define <16 x float> @reassociate_mins_v16f32(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, <16 x float> %x3) {
+; SSE-LABEL: reassociate_mins_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm2
+; SSE-NEXT: addps %xmm7, %xmm3
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: minps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_mins_v16f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vminps %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vminps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vminps %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vminps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_mins_v16f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vminps %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vminps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <16 x float> %x0, %x1
+ %cmp1 = fcmp olt <16 x float> %x2, %t0
+ %sel1 = select <16 x i1> %cmp1, <16 x float> %x2, <16 x float> %t0
+ %cmp2 = fcmp olt <16 x float> %x3, %sel1
+ %sel2 = select <16 x i1> %cmp2, <16 x float> %x3, <16 x float> %sel1
+ ret <16 x float> %sel2
+}
+
+; Verify that AVX512 512-bit vector single-precision maximum ops are reassociated.
+
+define <16 x float> @reassociate_maxs_v16f32(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, <16 x float> %x3) {
+; SSE-LABEL: reassociate_maxs_v16f32:
+; SSE: # %bb.0:
+; SSE-NEXT: addps %xmm4, %xmm0
+; SSE-NEXT: addps %xmm5, %xmm1
+; SSE-NEXT: addps %xmm6, %xmm2
+; SSE-NEXT: addps %xmm7, %xmm3
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: maxps {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_maxs_v16f32:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddps %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vmaxps %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vmaxps %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmaxps %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vmaxps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_maxs_v16f32:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vmaxps %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vmaxps %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <16 x float> %x0, %x1
+ %cmp1 = fcmp ogt <16 x float> %x2, %t0
+ %sel1 = select <16 x i1> %cmp1, <16 x float> %x2, <16 x float> %t0
+ %cmp2 = fcmp ogt <16 x float> %x3, %sel1
+ %sel2 = select <16 x i1> %cmp2, <16 x float> %x3, <16 x float> %sel1
+ ret <16 x float> %sel2
+}
+
+; Verify that AVX512 512-bit vector double-precision minimum ops are reassociated.
+
+define <8 x double> @reassociate_mins_v8f64(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, <8 x double> %x3) {
+; SSE-LABEL: reassociate_mins_v8f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm4, %xmm0
+; SSE-NEXT: addpd %xmm5, %xmm1
+; SSE-NEXT: addpd %xmm6, %xmm2
+; SSE-NEXT: addpd %xmm7, %xmm3
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: minpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_mins_v8f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vminpd %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vminpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vminpd %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vminpd %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_mins_v8f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vminpd %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vminpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <8 x double> %x0, %x1
+ %cmp1 = fcmp olt <8 x double> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x double> %x2, <8 x double> %t0
+ %cmp2 = fcmp olt <8 x double> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x double> %x3, <8 x double> %sel1
+ ret <8 x double> %sel2
+}
+
+; Verify that AVX512 512-bit vector double-precision maximum ops are reassociated.
+
+define <8 x double> @reassociate_maxs_v8f64(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, <8 x double> %x3) {
+; SSE-LABEL: reassociate_maxs_v8f64:
+; SSE: # %bb.0:
+; SSE-NEXT: addpd %xmm4, %xmm0
+; SSE-NEXT: addpd %xmm5, %xmm1
+; SSE-NEXT: addpd %xmm6, %xmm2
+; SSE-NEXT: addpd %xmm7, %xmm3
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm0
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm1
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm2
+; SSE-NEXT: maxpd {{[0-9]+}}(%rsp), %xmm3
+; SSE-NEXT: retq
+;
+; AVX1-LABEL: reassociate_maxs_v8f64:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vaddpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vaddpd %ymm3, %ymm1, %ymm1
+; AVX1-NEXT: vmaxpd %ymm6, %ymm4, %ymm2
+; AVX1-NEXT: vmaxpd %ymm2, %ymm0, %ymm0
+; AVX1-NEXT: vmaxpd %ymm7, %ymm5, %ymm2
+; AVX1-NEXT: vmaxpd %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: retq
+;
+; AVX512-LABEL: reassociate_maxs_v8f64:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vaddpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: vmaxpd %zmm3, %zmm2, %zmm1
+; AVX512-NEXT: vmaxpd %zmm1, %zmm0, %zmm0
+; AVX512-NEXT: retq
+ %t0 = fadd <8 x double> %x0, %x1
+ %cmp1 = fcmp ogt <8 x double> %x2, %t0
+ %sel1 = select <8 x i1> %cmp1, <8 x double> %x2, <8 x double> %t0
+ %cmp2 = fcmp ogt <8 x double> %x3, %sel1
+ %sel2 = select <8 x i1> %cmp2, <8 x double> %x3, <8 x double> %sel1
+ ret <8 x double> %sel2
+}
+
; PR25016: https://llvm.org/bugs/show_bug.cgi?id=25016
; Verify that reassociation is not happening needlessly or wrongly.
declare double @bar()
define double @reassociate_adds_from_calls() {
+; SSE-LABEL: reassociate_adds_from_calls:
+; SSE: # %bb.0:
+; SSE-NEXT: subq $24, %rsp
+; SSE-NEXT: .cfi_def_cfa_offset 32
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
+; SSE-NEXT: # xmm1 = mem[0],zero
+; SSE-NEXT: addsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Folded Reload
+; SSE-NEXT: addsd (%rsp), %xmm0 # 8-byte Folded Reload
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: addq $24, %rsp
+; SSE-NEXT: .cfi_def_cfa_offset 8
+; SSE-NEXT: retq
+;
; AVX-LABEL: reassociate_adds_from_calls:
-; AVX: callq bar
-; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd %xmm0, (%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd 8(%rsp), %xmm1
-; AVX: vaddsd 16(%rsp), %xmm1, %xmm1
-; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX: # %bb.0:
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
+; AVX-NEXT: # xmm1 = mem[0],zero
+; AVX-NEXT: vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 8-byte Folded Reload
+; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0 # 8-byte Folded Reload
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
%x0 = call double @bar()
%x1 = call double @bar()
@@ -653,18 +1121,45 @@ define double @reassociate_adds_from_calls() {
}
define double @already_reassociated() {
+; SSE-LABEL: already_reassociated:
+; SSE: # %bb.0:
+; SSE-NEXT: subq $24, %rsp
+; SSE-NEXT: .cfi_def_cfa_offset 32
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd %xmm0, (%rsp) # 8-byte Spill
+; SSE-NEXT: callq bar
+; SSE-NEXT: movsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
+; SSE-NEXT: # xmm1 = mem[0],zero
+; SSE-NEXT: addsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Folded Reload
+; SSE-NEXT: addsd (%rsp), %xmm0 # 8-byte Folded Reload
+; SSE-NEXT: addsd %xmm1, %xmm0
+; SSE-NEXT: addq $24, %rsp
+; SSE-NEXT: .cfi_def_cfa_offset 8
+; SSE-NEXT: retq
+;
; AVX-LABEL: already_reassociated:
-; AVX: callq bar
-; AVX-NEXT: vmovsd %xmm0, 16(%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd %xmm0, 8(%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd %xmm0, (%rsp)
-; AVX-NEXT: callq bar
-; AVX-NEXT: vmovsd 8(%rsp), %xmm1
-; AVX: vaddsd 16(%rsp), %xmm1, %xmm1
-; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0
-; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX: # %bb.0:
+; AVX-NEXT: subq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 32
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd %xmm0, (%rsp) # 8-byte Spill
+; AVX-NEXT: callq bar
+; AVX-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 8-byte Reload
+; AVX-NEXT: # xmm1 = mem[0],zero
+; AVX-NEXT: vaddsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 8-byte Folded Reload
+; AVX-NEXT: vaddsd (%rsp), %xmm0, %xmm0 # 8-byte Folded Reload
+; AVX-NEXT: vaddsd %xmm0, %xmm1, %xmm0
+; AVX-NEXT: addq $24, %rsp
+; AVX-NEXT: .cfi_def_cfa_offset 8
+; AVX-NEXT: retq
%x0 = call double @bar()
%x1 = call double @bar()
OpenPOWER on IntegriCloud