diff options
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/test/CodeGen/X86/recip-fastmath.ll | 402 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/recip-fastmath2.ll | 662 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/sqrt-fastmath.ll | 350 |
3 files changed, 1337 insertions, 77 deletions
diff --git a/llvm/test/CodeGen/X86/recip-fastmath.ll b/llvm/test/CodeGen/X86/recip-fastmath.ll index b1531c9a9d6..14f3255e041 100644 --- a/llvm/test/CodeGen/X86/recip-fastmath.ll +++ b/llvm/test/CodeGen/X86/recip-fastmath.ll @@ -833,6 +833,408 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 { ret <8 x float> %div } +define <16 x float> @v16f32_no_estimate(<16 x float> %x) #0 { +; SSE-LABEL: v16f32_no_estimate: +; SSE: # %bb.0: +; SSE-NEXT: movaps {{.*#+}} xmm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: divps %xmm0, %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: divps %xmm1, %xmm6 +; SSE-NEXT: movaps %xmm4, %xmm7 +; SSE-NEXT: divps %xmm2, %xmm7 +; SSE-NEXT: divps %xmm3, %xmm4 +; SSE-NEXT: movaps %xmm5, %xmm0 +; SSE-NEXT: movaps %xmm6, %xmm1 +; SSE-NEXT: movaps %xmm7, %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm3 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_no_estimate: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vdivps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vdivps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_no_estimate: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vdivps %ymm0, %ymm2, %ymm0 +; FMA-RECIP-NEXT: vdivps %ymm1, %ymm2, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_no_estimate: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vdivps %ymm0, %ymm2, %ymm0 # sched: [38:38.00] +; BTVER2-NEXT: vdivps %ymm1, %ymm2, %ymm1 # sched: [38:38.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_no_estimate: +; SANDY: # %bb.0: +; SANDY-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vdivps %ymm0, %ymm2, %ymm0 # sched: [29:28.00] +; SANDY-NEXT: vdivps %ymm1, %ymm2, %ymm1 # sched: [29:28.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_no_estimate: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vdivps %ymm0, %ymm2, %ymm0 # sched: [21:14.00] +; HASWELL-NEXT: vdivps %ymm1, %ymm2, %ymm1 # sched: [21:14.00] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_no_estimate: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm2 = [1,1,1,1,1,1,1,1] +; HASWELL-NO-FMA-NEXT: vdivps %ymm0, %ymm2, %ymm0 +; HASWELL-NO-FMA-NEXT: vdivps %ymm1, %ymm2, %ymm1 +; HASWELL-NO-FMA-NEXT: retq +; +; KNL-LABEL: v16f32_no_estimate: +; KNL: # %bb.0: +; KNL-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [10:1.00] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_no_estimate: +; SKX: # %bb.0: +; SKX-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x + ret <16 x float> %div +} + +define <16 x float> @v16f32_one_step(<16 x float> %x) #1 { +; SSE-LABEL: v16f32_one_step: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: rcpps %xmm0, %xmm6 +; SSE-NEXT: mulps %xmm6, %xmm5 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: subps %xmm5, %xmm0 +; SSE-NEXT: mulps %xmm6, %xmm0 +; SSE-NEXT: addps %xmm6, %xmm0 +; SSE-NEXT: rcpps %xmm1, %xmm6 +; SSE-NEXT: mulps %xmm6, %xmm1 +; SSE-NEXT: movaps %xmm3, %xmm5 +; SSE-NEXT: subps %xmm1, %xmm5 +; SSE-NEXT: mulps %xmm6, %xmm5 +; SSE-NEXT: addps %xmm6, %xmm5 +; SSE-NEXT: rcpps %xmm2, %xmm1 +; SSE-NEXT: mulps %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: subps %xmm2, %xmm6 +; SSE-NEXT: mulps %xmm1, %xmm6 +; SSE-NEXT: addps %xmm1, %xmm6 +; SSE-NEXT: rcpps %xmm4, %xmm1 +; SSE-NEXT: mulps %xmm1, %xmm4 +; SSE-NEXT: subps %xmm4, %xmm3 +; SSE-NEXT: mulps %xmm1, %xmm3 +; SSE-NEXT: addps %xmm1, %xmm3 +; SSE-NEXT: movaps %xmm5, %xmm1 +; SSE-NEXT: movaps %xmm6, %xmm2 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_one_step: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-RECIP-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_one_step: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_one_step: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm4 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm4, %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm4, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm4, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_one_step: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm0, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vrcpps %ymm1, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_one_step: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vrcpps %ymm1, %ymm4 # sched: [11:2.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_one_step: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] +; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0 +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; HASWELL-NO-FMA-NEXT: retq +; +; KNL-LABEL: v16f32_one_step: +; KNL: # %bb.0: +; KNL-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [10:1.00] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_one_step: +; SKX: # %bb.0: +; SKX-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x + ret <16 x float> %div +} + +define <16 x float> @v16f32_two_step(<16 x float> %x) #2 { +; SSE-LABEL: v16f32_two_step: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: movaps %xmm0, %xmm1 +; SSE-NEXT: rcpps %xmm0, %xmm0 +; SSE-NEXT: movaps %xmm1, %xmm6 +; SSE-NEXT: mulps %xmm0, %xmm6 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm0, %xmm7 +; SSE-NEXT: addps %xmm0, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm1 +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: subps %xmm1, %xmm0 +; SSE-NEXT: mulps %xmm7, %xmm0 +; SSE-NEXT: addps %xmm7, %xmm0 +; SSE-NEXT: rcpps %xmm5, %xmm1 +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: mulps %xmm1, %xmm6 +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm1, %xmm7 +; SSE-NEXT: addps %xmm1, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm5 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: subps %xmm5, %xmm1 +; SSE-NEXT: mulps %xmm7, %xmm1 +; SSE-NEXT: addps %xmm7, %xmm1 +; SSE-NEXT: rcpps %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm2, %xmm6 +; SSE-NEXT: mulps %xmm5, %xmm6 +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm5, %xmm7 +; SSE-NEXT: addps %xmm5, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm2 +; SSE-NEXT: movaps %xmm3, %xmm5 +; SSE-NEXT: subps %xmm2, %xmm5 +; SSE-NEXT: mulps %xmm7, %xmm5 +; SSE-NEXT: addps %xmm7, %xmm5 +; SSE-NEXT: rcpps %xmm4, %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: mulps %xmm2, %xmm6 +; SSE-NEXT: movaps %xmm3, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm2, %xmm7 +; SSE-NEXT: addps %xmm2, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm4 +; SSE-NEXT: subps %xmm4, %xmm3 +; SSE-NEXT: mulps %xmm7, %xmm3 +; SSE-NEXT: addps %xmm7, %xmm3 +; SSE-NEXT: movaps %xmm5, %xmm2 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_two_step: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm3 +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX-RECIP-NEXT: vsubps %ymm0, %ymm4, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm3 +; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-RECIP-NEXT: vsubps %ymm1, %ymm4, %ymm1 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_two_step: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4 +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_two_step: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm4, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm4, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_two_step: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm0, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm0, %ymm4, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vrcpps %ymm1, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm1, %ymm4, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_two_step: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vmovaps %ymm2, %ymm4 # sched: [1:1.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vmovaps %ymm2, %ymm4 # sched: [1:1.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_two_step: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm3 +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1] +; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm4, %ymm0 +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm3 +; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm4, %ymm1 +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; HASWELL-NO-FMA-NEXT: retq +; +; KNL-LABEL: v16f32_two_step: +; KNL: # %bb.0: +; KNL-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [10:1.00] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_two_step: +; SKX: # %bb.0: +; SKX-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x + ret <16 x float> %div +} + attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" } attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" } attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" } diff --git a/llvm/test/CodeGen/X86/recip-fastmath2.ll b/llvm/test/CodeGen/X86/recip-fastmath2.ll index f6eec39a6eb..48672c18ce3 100644 --- a/llvm/test/CodeGen/X86/recip-fastmath2.ll +++ b/llvm/test/CodeGen/X86/recip-fastmath2.ll @@ -1190,6 +1190,668 @@ define <8 x float> @v8f32_no_step2(<8 x float> %x) #3 { ret <8 x float> %div } +define <16 x float> @v16f32_one_step2(<16 x float> %x) #1 { +; SSE-LABEL: v16f32_one_step2: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm3, %xmm4 +; SSE-NEXT: movaps %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm0, %xmm6 +; SSE-NEXT: rcpps %xmm3, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm4 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: subps %xmm4, %xmm3 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: addps %xmm2, %xmm3 +; SSE-NEXT: rcpps %xmm5, %xmm4 +; SSE-NEXT: mulps %xmm4, %xmm5 +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: subps %xmm5, %xmm2 +; SSE-NEXT: mulps %xmm4, %xmm2 +; SSE-NEXT: addps %xmm4, %xmm2 +; SSE-NEXT: rcpps %xmm1, %xmm5 +; SSE-NEXT: mulps %xmm5, %xmm1 +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: subps %xmm1, %xmm4 +; SSE-NEXT: mulps %xmm5, %xmm4 +; SSE-NEXT: addps %xmm5, %xmm4 +; SSE-NEXT: rcpps %xmm6, %xmm1 +; SSE-NEXT: mulps %xmm1, %xmm6 +; SSE-NEXT: subps %xmm6, %xmm0 +; SSE-NEXT: mulps %xmm1, %xmm0 +; SSE-NEXT: addps %xmm1, %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm4 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 +; SSE-NEXT: movaps %xmm4, %xmm1 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_one_step2: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_one_step2: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_one_step2: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm4 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm4, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm4, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm4, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_one_step2: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm1, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vrcpps %ymm0, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_one_step2: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vrcpps %ymm0, %ymm4 # sched: [11:2.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_one_step2: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: retq # sched: [7:1.00] +; +; KNL-LABEL: v16f32_one_step2: +; KNL: # %bb.0: +; KNL-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [5:0.50] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_one_step2: +; SKX: # %bb.0: +; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x + ret <16 x float> %div +} + +define <16 x float> @v16f32_one_step_2_divs(<16 x float> %x) #1 { +; SSE-LABEL: v16f32_one_step_2_divs: +; SSE: # %bb.0: +; SSE-NEXT: rcpps %xmm0, %xmm6 +; SSE-NEXT: mulps %xmm6, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm4, %xmm5 +; SSE-NEXT: subps %xmm0, %xmm5 +; SSE-NEXT: mulps %xmm6, %xmm5 +; SSE-NEXT: addps %xmm6, %xmm5 +; SSE-NEXT: rcpps %xmm1, %xmm0 +; SSE-NEXT: mulps %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: subps %xmm1, %xmm6 +; SSE-NEXT: mulps %xmm0, %xmm6 +; SSE-NEXT: addps %xmm0, %xmm6 +; SSE-NEXT: rcpps %xmm2, %xmm0 +; SSE-NEXT: mulps %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm4, %xmm7 +; SSE-NEXT: subps %xmm2, %xmm7 +; SSE-NEXT: mulps %xmm0, %xmm7 +; SSE-NEXT: addps %xmm0, %xmm7 +; SSE-NEXT: rcpps %xmm3, %xmm0 +; SSE-NEXT: mulps %xmm0, %xmm3 +; SSE-NEXT: subps %xmm3, %xmm4 +; SSE-NEXT: mulps %xmm0, %xmm4 +; SSE-NEXT: addps %xmm0, %xmm4 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] +; SSE-NEXT: mulps %xmm4, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm2 = [9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01] +; SSE-NEXT: mulps %xmm7, %xmm2 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00] +; SSE-NEXT: mulps %xmm6, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00] +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: mulps %xmm6, %xmm1 +; SSE-NEXT: mulps %xmm7, %xmm2 +; SSE-NEXT: mulps %xmm4, %xmm3 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_one_step_2_divs: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vsubps %ymm0, %ymm3, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-RECIP-NEXT: vsubps %ymm1, %ymm3, %ymm1 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm3, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_one_step_2_divs: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 +; FMA-RECIP-NEXT: vmulps %ymm0, %ymm3, %ymm0 +; FMA-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_one_step_2_divs: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 # sched: [7:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 # sched: [7:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm3, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_one_step_2_divs: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm0, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vrcpps %ymm1, %ymm4 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm4, %ymm1, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm4, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm1, %ymm4, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 # sched: [12:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 # sched: [12:1.00] +; SANDY-NEXT: vmulps %ymm0, %ymm3, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_one_step_2_divs: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm2 * ymm0) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm2 * ymm1) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 # sched: [12:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 # sched: [12:0.50] +; HASWELL-NEXT: vmulps %ymm0, %ymm3, %ymm0 # sched: [5:0.50] +; HASWELL-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_one_step_2_divs: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm3, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm4 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm4, %ymm1, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm3, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm4, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm4, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm2 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm3 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm3, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: retq # sched: [7:1.00] +; +; KNL-LABEL: v16f32_one_step_2_divs: +; KNL: # %bb.0: +; KNL-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [10:1.00] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm1 # sched: [12:0.50] +; KNL-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [5:0.50] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_one_step_2_divs: +; SKX: # %bb.0: +; SKX-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: vmulps {{.*}}(%rip), %zmm0, %zmm1 # sched: [11:0.50] +; SKX-NEXT: vmulps %zmm0, %zmm1, %zmm0 # sched: [4:0.33] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x + %div2 = fdiv fast <16 x float> %div, %x + ret <16 x float> %div2 +} + +define <16 x float> @v16f32_two_step2(<16 x float> %x) #2 { +; SSE-LABEL: v16f32_two_step2: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm3, %xmm6 +; SSE-NEXT: movaps %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: rcpps %xmm3, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm0, %xmm7 +; SSE-NEXT: subps %xmm3, %xmm7 +; SSE-NEXT: mulps %xmm2, %xmm7 +; SSE-NEXT: addps %xmm2, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm6 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: subps %xmm6, %xmm3 +; SSE-NEXT: mulps %xmm7, %xmm3 +; SSE-NEXT: addps %xmm7, %xmm3 +; SSE-NEXT: rcpps %xmm5, %xmm2 +; SSE-NEXT: movaps %xmm5, %xmm6 +; SSE-NEXT: mulps %xmm2, %xmm6 +; SSE-NEXT: movaps %xmm0, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm2, %xmm7 +; SSE-NEXT: addps %xmm2, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm5 +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: subps %xmm5, %xmm2 +; SSE-NEXT: mulps %xmm7, %xmm2 +; SSE-NEXT: addps %xmm7, %xmm2 +; SSE-NEXT: rcpps %xmm1, %xmm5 +; SSE-NEXT: movaps %xmm1, %xmm6 +; SSE-NEXT: mulps %xmm5, %xmm6 +; SSE-NEXT: movaps %xmm0, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm5, %xmm7 +; SSE-NEXT: addps %xmm5, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm1 +; SSE-NEXT: movaps %xmm0, %xmm5 +; SSE-NEXT: subps %xmm1, %xmm5 +; SSE-NEXT: mulps %xmm7, %xmm5 +; SSE-NEXT: addps %xmm7, %xmm5 +; SSE-NEXT: rcpps %xmm4, %xmm1 +; SSE-NEXT: movaps %xmm4, %xmm6 +; SSE-NEXT: mulps %xmm1, %xmm6 +; SSE-NEXT: movaps %xmm0, %xmm7 +; SSE-NEXT: subps %xmm6, %xmm7 +; SSE-NEXT: mulps %xmm1, %xmm7 +; SSE-NEXT: addps %xmm1, %xmm7 +; SSE-NEXT: mulps %xmm7, %xmm4 +; SSE-NEXT: subps %xmm4, %xmm0 +; SSE-NEXT: mulps %xmm7, %xmm0 +; SSE-NEXT: addps %xmm7, %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm5 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 +; SSE-NEXT: movaps %xmm5, %xmm1 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_two_step2: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm3 +; AVX-RECIP-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm1, %ymm1 +; AVX-RECIP-NEXT: vsubps %ymm1, %ymm4, %ymm1 +; AVX-RECIP-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vaddps %ymm1, %ymm2, %ymm1 +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm3 +; AVX-RECIP-NEXT: vsubps %ymm3, %ymm4, %ymm3 +; AVX-RECIP-NEXT: vmulps %ymm3, %ymm2, %ymm3 +; AVX-RECIP-NEXT: vaddps %ymm3, %ymm2, %ymm2 +; AVX-RECIP-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX-RECIP-NEXT: vsubps %ymm0, %ymm4, %ymm0 +; AVX-RECIP-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vaddps %ymm0, %ymm2, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_two_step2: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm2 +; FMA-RECIP-NEXT: vmovaps {{.*#+}} ymm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4 +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm2 +; FMA-RECIP-NEXT: vmovaps %ymm2, %ymm4 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 +; FMA-RECIP-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3 +; FMA-RECIP-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_two_step2: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [5:1.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm1, %ymm4, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:2.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm2 # sched: [2:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vsubps %ymm0, %ymm4, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_two_step2: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm1, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vmovaps {{.*#+}} ymm4 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] sched: [7:0.50] +; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm1, %ymm4, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; SANDY-NEXT: vrcpps %ymm0, %ymm2 # sched: [7:2.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vsubps %ymm0, %ymm4, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:1.00] +; SANDY-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_two_step2: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vbroadcastss {{.*#+}} ymm3 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NEXT: vmovaps %ymm2, %ymm4 # sched: [1:1.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm1 * ymm4) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm1 = -(ymm4 * ymm1) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm1 = (ymm1 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NEXT: vmovaps %ymm2, %ymm4 # sched: [1:1.00] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm4 = -(ymm0 * ymm4) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm4 = (ymm4 * ymm2) + ymm2 sched: [5:0.50] +; HASWELL-NEXT: vfnmadd213ps {{.*#+}} ymm0 = -(ymm4 * ymm0) + ymm3 sched: [5:0.50] +; HASWELL-NEXT: vfmadd132ps {{.*#+}} ymm0 = (ymm0 * ymm4) + ymm4 sched: [5:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_two_step2: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm2 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm3 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vbroadcastss {{.*#+}} ymm4 = [1,1,1,1,1,1,1,1] sched: [7:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm1, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm1, %ymm4, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm1, %ymm2, %ymm1 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm1, %ymm2, %ymm1 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm2 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm3 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm3, %ymm4, %ymm3 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm3, %ymm2, %ymm3 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm3, %ymm2, %ymm2 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm2, %ymm0, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vsubps %ymm0, %ymm4, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps %ymm0, %ymm2, %ymm0 # sched: [5:0.50] +; HASWELL-NO-FMA-NEXT: vaddps %ymm0, %ymm2, %ymm0 # sched: [3:1.00] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: retq # sched: [7:1.00] +; +; KNL-LABEL: v16f32_two_step2: +; KNL: # %bb.0: +; KNL-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [5:0.50] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_two_step2: +; SKX: # %bb.0: +; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x + ret <16 x float> %div +} + +define <16 x float> @v16f32_no_step(<16 x float> %x) #3 { +; SSE-LABEL: v16f32_no_step: +; SSE: # %bb.0: +; SSE-NEXT: rcpps %xmm0, %xmm0 +; SSE-NEXT: rcpps %xmm1, %xmm1 +; SSE-NEXT: rcpps %xmm2, %xmm2 +; SSE-NEXT: rcpps %xmm3, %xmm3 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_no_step: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0 +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_no_step: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0 +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_no_step: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vrcpps %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_no_step: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00] +; SANDY-NEXT: vrcpps %ymm1, %ymm1 # sched: [7:2.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_no_step: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00] +; HASWELL-NEXT: vrcpps %ymm1, %ymm1 # sched: [11:2.00] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_no_step: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm1 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: retq # sched: [7:1.00] +; +; KNL-LABEL: v16f32_no_step: +; KNL: # %bb.0: +; KNL-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [10:1.00] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_no_step: +; SKX: # %bb.0: +; SKX-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %x + ret <16 x float> %div +} + +define <16 x float> @v16f32_no_step2(<16 x float> %x) #3 { +; SSE-LABEL: v16f32_no_step2: +; SSE: # %bb.0: +; SSE-NEXT: rcpps %xmm3, %xmm3 +; SSE-NEXT: rcpps %xmm2, %xmm2 +; SSE-NEXT: rcpps %xmm1, %xmm1 +; SSE-NEXT: rcpps %xmm0, %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm0 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm3 +; SSE-NEXT: retq +; +; AVX-RECIP-LABEL: v16f32_no_step2: +; AVX-RECIP: # %bb.0: +; AVX-RECIP-NEXT: vrcpps %ymm1, %ymm1 +; AVX-RECIP-NEXT: vrcpps %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; AVX-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; AVX-RECIP-NEXT: retq +; +; FMA-RECIP-LABEL: v16f32_no_step2: +; FMA-RECIP: # %bb.0: +; FMA-RECIP-NEXT: vrcpps %ymm1, %ymm1 +; FMA-RECIP-NEXT: vrcpps %ymm0, %ymm0 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 +; FMA-RECIP-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; FMA-RECIP-NEXT: retq +; +; BTVER2-LABEL: v16f32_no_step2: +; BTVER2: # %bb.0: +; BTVER2-NEXT: vrcpps %ymm1, %ymm1 # sched: [2:2.00] +; BTVER2-NEXT: vrcpps %ymm0, %ymm0 # sched: [2:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [7:2.00] +; BTVER2-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [7:2.00] +; BTVER2-NEXT: retq # sched: [4:1.00] +; +; SANDY-LABEL: v16f32_no_step2: +; SANDY: # %bb.0: +; SANDY-NEXT: vrcpps %ymm1, %ymm1 # sched: [7:2.00] +; SANDY-NEXT: vrcpps %ymm0, %ymm0 # sched: [7:2.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:1.00] +; SANDY-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:1.00] +; SANDY-NEXT: retq # sched: [1:1.00] +; +; HASWELL-LABEL: v16f32_no_step2: +; HASWELL: # %bb.0: +; HASWELL-NEXT: vrcpps %ymm1, %ymm1 # sched: [11:2.00] +; HASWELL-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NEXT: retq # sched: [7:1.00] +; +; HASWELL-NO-FMA-LABEL: v16f32_no_step2: +; HASWELL-NO-FMA: # %bb.0: +; HASWELL-NO-FMA-NEXT: vrcpps %ymm1, %ymm1 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vrcpps %ymm0, %ymm0 # sched: [11:2.00] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm0, %ymm0 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 # sched: [12:0.50] +; HASWELL-NO-FMA-NEXT: retq # sched: [7:1.00] +; +; KNL-LABEL: v16f32_no_step2: +; KNL: # %bb.0: +; KNL-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [5:0.50] +; KNL-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [12:1.00] +; KNL-NEXT: retq # sched: [7:1.00] +; +; SKX-LABEL: v16f32_no_step2: +; SKX: # %bb.0: +; SKX-NEXT: vmovaps {{.*#+}} zmm1 = [1.000000e+00,2.000000e+00,3.000000e+00,4.000000e+00,5.000000e+00,6.000000e+00,7.000000e+00,8.000000e+00,9.000000e+00,1.000000e+01,1.100000e+01,1.200000e+01,1.300000e+01,1.400000e+01,1.500000e+01,1.600000e+01] sched: [8:0.50] +; SKX-NEXT: vdivps %zmm0, %zmm1, %zmm0 # sched: [18:10.00] +; SKX-NEXT: retq # sched: [7:1.00] + %div = fdiv fast <16 x float> <float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, float 9.0, float 10.0, float 11.0, float 12.0, float 13.0, float 14.0, float 15.0, float 16.0>, %x + ret <16 x float> %div +} + attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!divf,!vec-divf" } attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf,vec-divf" } attributes #2 = { "unsafe-fp-math"="true" "reciprocal-estimates"="divf:2,vec-divf:2" } diff --git a/llvm/test/CodeGen/X86/sqrt-fastmath.ll b/llvm/test/CodeGen/X86/sqrt-fastmath.ll index 07495e159e1..91ce663ccef 100644 --- a/llvm/test/CodeGen/X86/sqrt-fastmath.ll +++ b/llvm/test/CodeGen/X86/sqrt-fastmath.ll @@ -1,6 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX512 declare double @__sqrt_finite(double) declare float @__sqrtf_finite(float) @@ -8,6 +9,7 @@ declare x86_fp80 @__sqrtl_finite(x86_fp80) declare float @llvm.sqrt.f32(float) declare <4 x float> @llvm.sqrt.v4f32(<4 x float>) declare <8 x float> @llvm.sqrt.v8f32(<8 x float>) +declare <16 x float> @llvm.sqrt.v16f32(<16 x float>) define double @finite_f64_no_estimate(double %d) #0 { @@ -70,18 +72,31 @@ define float @finite_f32_estimate(float %f) #1 { ; SSE-NEXT: andnps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: finite_f32_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: finite_f32_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX1-NEXT: vmulss %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX1-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vandnps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: finite_f32_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm2 * xmm1) + mem +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 +; AVX512-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 +; AVX512-NEXT: vcmpeqss %xmm2, %xmm0, %k1 +; AVX512-NEXT: vmovss %xmm2, %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovaps %xmm1, %xmm0 +; AVX512-NEXT: retq %call = tail call float @__sqrtf_finite(float %f) #2 ret float %call } @@ -126,18 +141,33 @@ define float @sqrtf_check_denorms(float %x) #3 { ; SSE-NEXT: andnps %xmm2, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: sqrtf_check_denorms: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 -; AVX-NEXT: vmulss %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vcmpltss {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: sqrtf_check_denorms: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX1-NEXT: vmulss %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vaddss {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 +; AVX1-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vcmpltss {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vandnps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: sqrtf_check_denorms: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm1 = (xmm2 * xmm1) + mem +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm2, %xmm2 +; AVX512-NEXT: vmulss %xmm1, %xmm2, %xmm1 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN] +; AVX512-NEXT: vandps %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vcmpltss {{.*}}(%rip), %xmm0, %k1 +; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; AVX512-NEXT: vmovss %xmm0, %xmm0, %xmm1 {%k1} +; AVX512-NEXT: vmovaps %xmm1, %xmm0 +; AVX512-NEXT: retq %call = tail call float @__sqrtf_finite(float %x) #2 ret float %call } @@ -160,19 +190,35 @@ define <4 x float> @sqrt_v4f32_check_denorms(<4 x float> %x) #3 { ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: sqrt_v4f32_check_denorms: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtps %xmm0, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm2 -; AVX-NEXT: vmulps {{.*}}(%rip), %xmm2, %xmm3 -; AVX-NEXT: vmulps %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vaddps {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm3, %xmm1 -; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm2 = [1.175494e-38,1.175494e-38,1.175494e-38,1.175494e-38] -; AVX-NEXT: vcmpleps %xmm0, %xmm2, %xmm0 -; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: sqrt_v4f32_check_denorms: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtps %xmm0, %xmm1 +; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm2 +; AVX1-NEXT: vmulps {{.*}}(%rip), %xmm2, %xmm3 +; AVX1-NEXT: vmulps %xmm1, %xmm2, %xmm1 +; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmulps %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vmovaps {{.*#+}} xmm2 = [1.175494e-38,1.175494e-38,1.175494e-38,1.175494e-38] +; AVX1-NEXT: vcmpleps %xmm0, %xmm2, %xmm0 +; AVX1-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: sqrt_v4f32_check_denorms: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtps %xmm0, %xmm1 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm3 = [-3,-3,-3,-3] +; AVX512-NEXT: vfmadd231ps {{.*#+}} xmm3 = (xmm2 * xmm1) + xmm3 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [-0.5,-0.5,-0.5,-0.5] +; AVX512-NEXT: vmulps %xmm3, %xmm1, %xmm1 +; AVX512-NEXT: vmulps %xmm1, %xmm2, %xmm1 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [NaN,NaN,NaN,NaN] +; AVX512-NEXT: vandps %xmm2, %xmm0, %xmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [1.17549435E-38,1.17549435E-38,1.17549435E-38,1.17549435E-38] +; AVX512-NEXT: vcmpleps %xmm0, %xmm2, %xmm0 +; AVX512-NEXT: vandps %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: retq %call = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) #2 ret <4 x float> %call } @@ -209,15 +255,24 @@ define float @f32_estimate(float %x) #1 { ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: f32_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 -; AVX-NEXT: vmulss %xmm1, %xmm1, %xmm2 -; AVX-NEXT: vmulss %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: f32_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX1-NEXT: vmulss %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vmulss %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: f32_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 +; AVX512-NEXT: vmulss %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vfmadd213ss {{.*#+}} xmm0 = (xmm1 * xmm0) + mem +; AVX512-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 +; AVX512-NEXT: vmulss %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: retq %sqrt = tail call float @llvm.sqrt.f32(float %x) %div = fdiv fast float 1.0, %sqrt ret float %div @@ -231,12 +286,19 @@ define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 { ; SSE-NEXT: divps %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: v4f32_no_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vsqrtps %xmm0, %xmm0 -; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: v4f32_no_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vsqrtps %xmm0, %xmm0 +; AVX1-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX1-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4f32_no_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtps %xmm0, %xmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1] +; AVX512-NEXT: vdivps %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: retq %sqrt = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt ret <4 x float> %div @@ -255,15 +317,26 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 { ; SSE-NEXT: movaps %xmm1, %xmm0 ; SSE-NEXT: retq ; -; AVX-LABEL: v4f32_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtps %xmm0, %xmm1 -; AVX-NEXT: vmulps %xmm1, %xmm1, %xmm2 -; AVX-NEXT: vmulps %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %xmm1, %xmm1 -; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX1-LABEL: v4f32_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtps %xmm0, %xmm1 +; AVX1-NEXT: vmulps %xmm1, %xmm1, %xmm2 +; AVX1-NEXT: vmulps %xmm2, %xmm0, %xmm0 +; AVX1-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0 +; AVX1-NEXT: vmulps {{.*}}(%rip), %xmm1, %xmm1 +; AVX1-NEXT: vmulps %xmm0, %xmm1, %xmm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v4f32_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtps %xmm0, %xmm1 +; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-3,-3,-3,-3] +; AVX512-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; AVX512-NEXT: vbroadcastss {{.*#+}} xmm2 = [-0.5,-0.5,-0.5,-0.5] +; AVX512-NEXT: vmulps %xmm0, %xmm2, %xmm0 +; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0 +; AVX512-NEXT: retq %sqrt = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) %div = fdiv fast <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt ret <4 x float> %div @@ -280,12 +353,19 @@ define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 { ; SSE-NEXT: divps %xmm2, %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: v8f32_no_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vsqrtps %ymm0, %ymm0 -; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] -; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: v8f32_no_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vsqrtps %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX1-NEXT: vdivps %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v8f32_no_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtps %ymm0, %ymm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} ymm1 = [1,1,1,1,1,1,1,1] +; AVX512-NEXT: vdivps %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: retq %sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x) %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt ret <8 x float> %div @@ -314,20 +394,136 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 { ; SSE-NEXT: movaps %xmm3, %xmm1 ; SSE-NEXT: retq ; -; AVX-LABEL: v8f32_estimate: -; AVX: # %bb.0: -; AVX-NEXT: vrsqrtps %ymm0, %ymm1 -; AVX-NEXT: vmulps %ymm1, %ymm1, %ymm2 -; AVX-NEXT: vmulps %ymm2, %ymm0, %ymm0 -; AVX-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 -; AVX-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 -; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 -; AVX-NEXT: retq +; AVX1-LABEL: v8f32_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtps %ymm0, %ymm1 +; AVX1-NEXT: vmulps %ymm1, %ymm1, %ymm2 +; AVX1-NEXT: vmulps %ymm2, %ymm0, %ymm0 +; AVX1-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 +; AVX1-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 +; AVX1-NEXT: vmulps %ymm0, %ymm1, %ymm0 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v8f32_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vrsqrtps %ymm0, %ymm1 +; AVX512-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} ymm2 = [-3,-3,-3,-3,-3,-3,-3,-3] +; AVX512-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 +; AVX512-NEXT: vbroadcastss {{.*#+}} ymm2 = [-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5,-0.5] +; AVX512-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vmulps %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: retq %sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x) %div = fdiv fast <8 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt ret <8 x float> %div } +define <16 x float> @v16f32_no_estimate(<16 x float> %x) #0 { +; SSE-LABEL: v16f32_no_estimate: +; SSE: # %bb.0: +; SSE-NEXT: sqrtps %xmm3, %xmm4 +; SSE-NEXT: sqrtps %xmm2, %xmm5 +; SSE-NEXT: sqrtps %xmm1, %xmm2 +; SSE-NEXT: sqrtps %xmm0, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm3 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: divps %xmm1, %xmm0 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: divps %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm3, %xmm2 +; SSE-NEXT: divps %xmm5, %xmm2 +; SSE-NEXT: divps %xmm4, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16f32_no_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vsqrtps %ymm1, %ymm1 +; AVX1-NEXT: vsqrtps %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; AVX1-NEXT: vdivps %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vdivps %ymm1, %ymm2, %ymm1 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v16f32_no_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtps %zmm0, %zmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq + %sqrt = tail call <16 x float> @llvm.sqrt.v16f32(<16 x float> %x) + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt + ret <16 x float> %div +} + +define <16 x float> @v16f32_estimate(<16 x float> %x) #1 { +; SSE-LABEL: v16f32_estimate: +; SSE: # %bb.0: +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: movaps %xmm0, %xmm1 +; SSE-NEXT: rsqrtps %xmm0, %xmm5 +; SSE-NEXT: movaps {{.*#+}} xmm6 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] +; SSE-NEXT: movaps %xmm5, %xmm0 +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: mulps %xmm1, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm7 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] +; SSE-NEXT: addps %xmm7, %xmm0 +; SSE-NEXT: mulps %xmm6, %xmm0 +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: rsqrtps %xmm4, %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm1 +; SSE-NEXT: mulps %xmm5, %xmm1 +; SSE-NEXT: mulps %xmm4, %xmm1 +; SSE-NEXT: addps %xmm7, %xmm1 +; SSE-NEXT: mulps %xmm6, %xmm1 +; SSE-NEXT: mulps %xmm5, %xmm1 +; SSE-NEXT: rsqrtps %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm4 +; SSE-NEXT: mulps %xmm5, %xmm4 +; SSE-NEXT: mulps %xmm2, %xmm4 +; SSE-NEXT: addps %xmm7, %xmm4 +; SSE-NEXT: mulps %xmm6, %xmm4 +; SSE-NEXT: mulps %xmm5, %xmm4 +; SSE-NEXT: rsqrtps %xmm3, %xmm2 +; SSE-NEXT: movaps %xmm2, %xmm5 +; SSE-NEXT: mulps %xmm2, %xmm5 +; SSE-NEXT: mulps %xmm3, %xmm5 +; SSE-NEXT: addps %xmm7, %xmm5 +; SSE-NEXT: mulps %xmm6, %xmm5 +; SSE-NEXT: mulps %xmm2, %xmm5 +; SSE-NEXT: movaps %xmm4, %xmm2 +; SSE-NEXT: movaps %xmm5, %xmm3 +; SSE-NEXT: retq +; +; AVX1-LABEL: v16f32_estimate: +; AVX1: # %bb.0: +; AVX1-NEXT: vrsqrtps %ymm0, %ymm2 +; AVX1-NEXT: vmovaps {{.*#+}} ymm3 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] +; AVX1-NEXT: vmulps %ymm2, %ymm2, %ymm4 +; AVX1-NEXT: vmulps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] +; AVX1-NEXT: vaddps %ymm4, %ymm0, %ymm0 +; AVX1-NEXT: vmulps %ymm0, %ymm3, %ymm0 +; AVX1-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; AVX1-NEXT: vrsqrtps %ymm1, %ymm2 +; AVX1-NEXT: vmulps %ymm2, %ymm2, %ymm5 +; AVX1-NEXT: vmulps %ymm5, %ymm1, %ymm1 +; AVX1-NEXT: vaddps %ymm4, %ymm1, %ymm1 +; AVX1-NEXT: vmulps %ymm1, %ymm3, %ymm1 +; AVX1-NEXT: vmulps %ymm1, %ymm2, %ymm1 +; AVX1-NEXT: retq +; +; AVX512-LABEL: v16f32_estimate: +; AVX512: # %bb.0: +; AVX512-NEXT: vsqrtps %zmm0, %zmm0 +; AVX512-NEXT: vbroadcastss {{.*#+}} zmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] +; AVX512-NEXT: vdivps %zmm0, %zmm1, %zmm0 +; AVX512-NEXT: retq + %sqrt = tail call <16 x float> @llvm.sqrt.v16f32(<16 x float> %x) + %div = fdiv fast <16 x float> <float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0, float 1.0>, %sqrt + ret <16 x float> %div +} + attributes #0 = { "unsafe-fp-math"="true" "reciprocal-estimates"="!sqrtf,!vec-sqrtf,!divf,!vec-divf" } attributes #1 = { "unsafe-fp-math"="true" "reciprocal-estimates"="sqrt,vec-sqrt" } |

