diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/machine-combiner.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/machine-combiner.ll | 100 |
1 files changed, 50 insertions, 50 deletions
diff --git a/llvm/test/CodeGen/X86/machine-combiner.ll b/llvm/test/CodeGen/X86/machine-combiner.ll index 048d30b6b24..d634dbb6569 100644 --- a/llvm/test/CodeGen/X86/machine-combiner.ll +++ b/llvm/test/CodeGen/X86/machine-combiner.ll @@ -11,14 +11,14 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_adds1: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds1: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -31,14 +31,14 @@ define float @reassociate_adds1(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_adds2: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds2: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -51,14 +51,14 @@ define float @reassociate_adds2(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_adds3: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds3: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -71,14 +71,14 @@ define float @reassociate_adds3(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_adds4: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds4: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -94,7 +94,7 @@ define float @reassociate_adds4(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, float %x4, float %x5, float %x6, float %x7) { ; SSE-LABEL: reassociate_adds5: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 @@ -105,7 +105,7 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds5: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -131,14 +131,14 @@ define float @reassociate_adds5(float %x0, float %x1, float %x2, float %x3, floa define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_adds6: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divss %xmm1, %xmm0 ; SSE-NEXT: addss %xmm3, %xmm2 ; SSE-NEXT: addss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds6: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddss %xmm1, %xmm0, %xmm0 @@ -153,14 +153,14 @@ define float @reassociate_adds6(float %x0, float %x1, float %x2, float %x3) { define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_muls1: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divss %xmm1, %xmm0 ; SSE-NEXT: mulss %xmm3, %xmm2 ; SSE-NEXT: mulss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_muls1: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm0 @@ -175,14 +175,14 @@ define float @reassociate_muls1(float %x0, float %x1, float %x2, float %x3) { define double @reassociate_adds_double(double %x0, double %x1, double %x2, double %x3) { ; SSE-LABEL: reassociate_adds_double: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divsd %xmm1, %xmm0 ; SSE-NEXT: addsd %xmm3, %xmm2 ; SSE-NEXT: addsd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds_double: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddsd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddsd %xmm1, %xmm0, %xmm0 @@ -197,14 +197,14 @@ define double @reassociate_adds_double(double %x0, double %x1, double %x2, doubl define double @reassociate_muls_double(double %x0, double %x1, double %x2, double %x3) { ; SSE-LABEL: reassociate_muls_double: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divsd %xmm1, %xmm0 ; SSE-NEXT: mulsd %xmm3, %xmm2 ; SSE-NEXT: mulsd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_muls_double: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulsd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmulsd %xmm1, %xmm0, %xmm0 @@ -219,14 +219,14 @@ define double @reassociate_muls_double(double %x0, double %x1, double %x2, doubl define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; SSE-LABEL: reassociate_adds_v4f32: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: mulps %xmm1, %xmm0 ; SSE-NEXT: addps %xmm3, %xmm2 ; SSE-NEXT: addps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds_v4f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddps %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 @@ -241,14 +241,14 @@ define <4 x float> @reassociate_adds_v4f32(<4 x float> %x0, <4 x float> %x1, <4 define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { ; SSE-LABEL: reassociate_adds_v2f64: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: mulpd %xmm1, %xmm0 ; SSE-NEXT: addpd %xmm3, %xmm2 ; SSE-NEXT: addpd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_adds_v2f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vaddpd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 @@ -263,14 +263,14 @@ define <2 x double> @reassociate_adds_v2f64(<2 x double> %x0, <2 x double> %x1, define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; SSE-LABEL: reassociate_muls_v4f32: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addps %xmm1, %xmm0 ; SSE-NEXT: mulps %xmm3, %xmm2 ; SSE-NEXT: mulps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_muls_v4f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulps %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm0 @@ -285,14 +285,14 @@ define <4 x float> @reassociate_muls_v4f32(<4 x float> %x0, <4 x float> %x1, <4 define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { ; SSE-LABEL: reassociate_muls_v2f64: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: mulpd %xmm3, %xmm2 ; SSE-NEXT: mulpd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_muls_v2f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmulpd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm0 @@ -307,7 +307,7 @@ define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { ; AVX-LABEL: reassociate_adds_v8f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 @@ -322,7 +322,7 @@ define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { ; AVX-LABEL: reassociate_adds_v4f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 @@ -337,7 +337,7 @@ define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { ; AVX-LABEL: reassociate_muls_v8f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 @@ -352,7 +352,7 @@ define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { ; AVX-LABEL: reassociate_muls_v4f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vmulpd %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0 @@ -367,14 +367,14 @@ define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_mins_single: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divss %xmm1, %xmm0 ; SSE-NEXT: minss %xmm3, %xmm2 ; SSE-NEXT: minss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_mins_single: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vminss %xmm1, %xmm0, %xmm0 @@ -391,14 +391,14 @@ define float @reassociate_mins_single(float %x0, float %x1, float %x2, float %x3 define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3) { ; SSE-LABEL: reassociate_maxs_single: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divss %xmm1, %xmm0 ; SSE-NEXT: maxss %xmm3, %xmm2 ; SSE-NEXT: maxss %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_maxs_single: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivss %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmaxss %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmaxss %xmm1, %xmm0, %xmm0 @@ -415,14 +415,14 @@ define float @reassociate_maxs_single(float %x0, float %x1, float %x2, float %x3 define double @reassociate_mins_double(double %x0, double %x1, double %x2, double %x3) { ; SSE-LABEL: reassociate_mins_double: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divsd %xmm1, %xmm0 ; SSE-NEXT: minsd %xmm3, %xmm2 ; SSE-NEXT: minsd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_mins_double: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminsd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vminsd %xmm1, %xmm0, %xmm0 @@ -439,14 +439,14 @@ define double @reassociate_mins_double(double %x0, double %x1, double %x2, doubl define double @reassociate_maxs_double(double %x0, double %x1, double %x2, double %x3) { ; SSE-LABEL: reassociate_maxs_double: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: divsd %xmm1, %xmm0 ; SSE-NEXT: maxsd %xmm3, %xmm2 ; SSE-NEXT: maxsd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_maxs_double: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vdivsd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmaxsd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmaxsd %xmm1, %xmm0, %xmm0 @@ -463,14 +463,14 @@ define double @reassociate_maxs_double(double %x0, double %x1, double %x2, doubl define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; SSE-LABEL: reassociate_mins_v4f32: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addps %xmm1, %xmm0 ; SSE-NEXT: minps %xmm3, %xmm2 ; SSE-NEXT: minps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_mins_v4f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminps %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vminps %xmm1, %xmm0, %xmm0 @@ -487,14 +487,14 @@ define <4 x float> @reassociate_mins_v4f32(<4 x float> %x0, <4 x float> %x1, <4 define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, <4 x float> %x3) { ; SSE-LABEL: reassociate_maxs_v4f32: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addps %xmm1, %xmm0 ; SSE-NEXT: maxps %xmm3, %xmm2 ; SSE-NEXT: maxps %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_maxs_v4f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmaxps %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmaxps %xmm1, %xmm0, %xmm0 @@ -511,14 +511,14 @@ define <4 x float> @reassociate_maxs_v4f32(<4 x float> %x0, <4 x float> %x1, <4 define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { ; SSE-LABEL: reassociate_mins_v2f64: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: minpd %xmm3, %xmm2 ; SSE-NEXT: minpd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_mins_v2f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vminpd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vminpd %xmm1, %xmm0, %xmm0 @@ -535,14 +535,14 @@ define <2 x double> @reassociate_mins_v2f64(<2 x double> %x0, <2 x double> %x1, define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, <2 x double> %x3) { ; SSE-LABEL: reassociate_maxs_v2f64: -; SSE: # BB#0: +; SSE: # %bb.0: ; SSE-NEXT: addpd %xmm1, %xmm0 ; SSE-NEXT: maxpd %xmm3, %xmm2 ; SSE-NEXT: maxpd %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: reassociate_maxs_v2f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm0 ; AVX-NEXT: vmaxpd %xmm3, %xmm2, %xmm1 ; AVX-NEXT: vmaxpd %xmm1, %xmm0, %xmm0 @@ -559,7 +559,7 @@ define <2 x double> @reassociate_maxs_v2f64(<2 x double> %x0, <2 x double> %x1, define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { ; AVX-LABEL: reassociate_mins_v8f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vminps %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vminps %ymm1, %ymm0, %ymm0 @@ -576,7 +576,7 @@ define <8 x float> @reassociate_mins_v8f32(<8 x float> %x0, <8 x float> %x1, <8 define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { ; AVX-LABEL: reassociate_maxs_v8f32: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vmaxps %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vmaxps %ymm1, %ymm0, %ymm0 @@ -593,7 +593,7 @@ define <8 x float> @reassociate_maxs_v8f32(<8 x float> %x0, <8 x float> %x1, <8 define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { ; AVX-LABEL: reassociate_mins_v4f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vminpd %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vminpd %ymm1, %ymm0, %ymm0 @@ -610,7 +610,7 @@ define <4 x double> @reassociate_mins_v4f64(<4 x double> %x0, <4 x double> %x1, define <4 x double> @reassociate_maxs_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { ; AVX-LABEL: reassociate_maxs_v4f64: -; AVX: # BB#0: +; AVX: # %bb.0: ; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 ; AVX-NEXT: vmaxpd %ymm3, %ymm2, %ymm1 ; AVX-NEXT: vmaxpd %ymm1, %ymm0, %ymm0 |