diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-07-08 01:10:43 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-07-08 01:10:43 +0000 |
| commit | fdf3f1ff82501bd6d0dd99a72f406cbc195909db (patch) | |
| tree | dd92110de99ec70f27983ff9fcdc8d87e33769df /llvm/test/CodeGen | |
| parent | d679d01a1f580c8a689c04a7908f5eff09e38ba9 (diff) | |
| download | bcm5719-llvm-fdf3f1ff82501bd6d0dd99a72f406cbc195909db.tar.gz bcm5719-llvm-fdf3f1ff82501bd6d0dd99a72f406cbc195909db.zip | |
[X86] Add new scalar fma intrinsics with rounding mode that use f32/f64 types.
This allows us to handle masking in a very similar way to the default rounding version that uses llvm.fma.
I had to add new rounding mode CodeGenOnly instructions to support isel when we can't find a movss to grab the upper bits from to use the b_Int instruction.
Fast-isel tests have been updated to match new clang codegen.
We are currently having trouble folding fneg into the new intrinsic. I'm going to correct that in a follow up patch to keep the size of this one down.
A future patch will also remove the old intrinsics.
llvm-svn: 336506
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll | 364 |
1 files changed, 272 insertions, 92 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll index ae45e55ee5d..1c7f633df38 100644 --- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll +++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll @@ -4899,11 +4899,18 @@ define <4 x float> @test_mm_mask_fmadd_round_ss(<4 x float> %__W, i8 zeroext %__ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %__B, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__A, i64 0 + %2 = extractelement <4 x float> %__B, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %0 + %7 = insertelement <4 x float> %__W, float %6, i64 0 + ret <4 x float> %7 } -declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 +declare float @llvm.x86.avx512.vfmadd.f32(float, float, float, i32) #1 define <4 x float> @test_mm_maskz_fmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_ss: @@ -4944,12 +4951,17 @@ define <4 x float> @test_mm_maskz_fmadd_round_ss(i8 zeroext %__U, <4 x float> %_ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %__C, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__A, i64 0 + %1 = extractelement <4 x float> %__B, i64 0 + %2 = extractelement <4 x float> %__C, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float 0.000000e+00 + %7 = insertelement <4 x float> %__A, float %6, i64 0 + ret <4 x float> %7 } -declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 - define <4 x float> @test_mm_mask3_fmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_ss: ; X86: # %bb.0: # %entry @@ -4994,12 +5006,17 @@ define <4 x float> @test_mm_mask3_fmadd_round_ss(<4 x float> %__W, <4 x float> % ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: - %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__X, i64 0 + %2 = extractelement <4 x float> %__Y, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %2 + %7 = insertelement <4 x float> %__Y, float %6, i64 0 + ret <4 x float> %7 } -declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 - define <4 x float> @test_mm_mask_fmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fmsub_ss: ; X86: # %bb.0: # %entry @@ -5045,9 +5062,16 @@ define <4 x float> @test_mm_mask_fmsub_round_ss(<4 x float> %__W, i8 zeroext %__ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B - %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %__A, <4 x float> %sub, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__A, i64 0 + %.rhs = extractelement <4 x float> %__B, i64 0 + %2 = fsub float -0.000000e+00, %.rhs + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %0 + %7 = insertelement <4 x float> %__W, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_maskz_fmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { @@ -5094,9 +5118,16 @@ define <4 x float> @test_mm_maskz_fmsub_round_ss(i8 zeroext %__U, <4 x float> %_ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C - %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %__B, <4 x float> %sub, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__A, i64 0 + %1 = extractelement <4 x float> %__B, i64 0 + %.rhs = extractelement <4 x float> %__C, i64 0 + %2 = fsub float -0.000000e+00, %.rhs + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float 0.000000e+00 + %7 = insertelement <4 x float> %__A, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_mask3_fmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { @@ -5132,24 +5163,36 @@ define <4 x float> @test_mm_mask3_fmsub_round_ss(<4 x float> %__W, <4 x float> % ; X86-LABEL: test_mm_mask3_fmsub_round_ss: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] +; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3 +; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fmsub_round_ss: ; X64: # %bb.0: # %entry +; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] +; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3 +; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: - %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %1 = extractelement <4 x float> %__X, i64 0 + %.rhs = extractelement <4 x float> %__Y, i64 0 + %2 = fsub float -0.000000e+00, %.rhs + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %.rhs + %7 = insertelement <4 x float> %__Y, float %6, i64 0 + ret <4 x float> %7 } -declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float>, <4 x float>, <4 x float>, i8, i32) #1 - define <4 x float> @test_mm_mask_fnmadd_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_ss: ; X86: # %bb.0: # %entry @@ -5195,9 +5238,16 @@ define <4 x float> @test_mm_mask_fnmadd_round_ss(<4 x float> %__W, i8 zeroext %_ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A - %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__B, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs = extractelement <4 x float> %__A, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %2 = extractelement <4 x float> %__B, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %0 + %7 = insertelement <4 x float> %__W, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_maskz_fnmadd_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { @@ -5244,9 +5294,16 @@ define <4 x float> @test_mm_maskz_fnmadd_round_ss(i8 zeroext %__U, <4 x float> % ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B - %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %__C, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__A, i64 0 + %.rhs = extractelement <4 x float> %__B, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %2 = extractelement <4 x float> %__C, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float 0.000000e+00 + %7 = insertelement <4 x float> %__A, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_mask3_fnmadd_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { @@ -5298,9 +5355,16 @@ define <4 x float> @test_mm_mask3_fnmadd_round_ss(<4 x float> %__W, <4 x float> ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__X - %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs = extractelement <4 x float> %__X, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %2 = extractelement <4 x float> %__Y, i64 0 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %2 + %7 = insertelement <4 x float> %__Y, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_mask_fnmsub_ss(<4 x float> %__W, i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B) { @@ -5351,10 +5415,17 @@ define <4 x float> @test_mm_mask_fnmsub_round_ss(<4 x float> %__W, i8 zeroext %_ ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__A - %sub1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B - %0 = tail call <4 x float> @llvm.x86.avx512.mask.vfmadd.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs = extractelement <4 x float> %__A, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %.rhs2 = extractelement <4 x float> %__B, i64 0 + %2 = fsub float -0.000000e+00, %.rhs2 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %0 + %7 = insertelement <4 x float> %__W, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_maskz_fnmsub_ss(i8 zeroext %__U, <4 x float> %__A, <4 x float> %__B, <4 x float> %__C) { @@ -5404,10 +5475,17 @@ define <4 x float> @test_mm_maskz_fnmsub_round_ss(i8 zeroext %__U, <4 x float> % ; X64-NEXT: vfmadd213ss {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__B - %sub1 = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__C - %0 = tail call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ss(<4 x float> %__A, <4 x float> %sub, <4 x float> %sub1, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__A, i64 0 + %.rhs = extractelement <4 x float> %__B, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %.rhs2 = extractelement <4 x float> %__C, i64 0 + %2 = fsub float -0.000000e+00, %.rhs2 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float 0.000000e+00 + %7 = insertelement <4 x float> %__A, float %6, i64 0 + ret <4 x float> %7 } define <4 x float> @test_mm_mask3_fnmsub_ss(<4 x float> %__W, <4 x float> %__X, <4 x float> %__Y, i8 zeroext %__U) { @@ -5446,8 +5524,10 @@ define <4 x float> @test_mm_mask3_fnmsub_round_ss(<4 x float> %__W, <4 x float> ; X86-NEXT: movb {{[0-9]+}}(%esp), %al ; X86-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] ; X86-NEXT: vxorps %xmm3, %xmm1, %xmm1 +; X86-NEXT: vxorps %xmm3, %xmm2, %xmm3 +; X86-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X86-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; X86-NEXT: vmovaps %xmm2, %xmm0 ; X86-NEXT: retl ; @@ -5455,14 +5535,24 @@ define <4 x float> @test_mm_mask3_fnmsub_round_ss(<4 x float> %__W, <4 x float> ; X64: # %bb.0: # %entry ; X64-NEXT: vbroadcastss {{.*#+}} xmm3 = [-0,-0,-0,-0] ; X64-NEXT: vxorps %xmm3, %xmm1, %xmm1 +; X64-NEXT: vxorps %xmm3, %xmm2, %xmm3 +; X64-NEXT: vfmadd213ss %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmsub231ss {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X64-NEXT: vmovss %xmm1, %xmm2, %xmm2 {%k1} ; X64-NEXT: vmovaps %xmm2, %xmm0 ; X64-NEXT: retq entry: - %sub = fsub <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00>, %__X - %0 = tail call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ss(<4 x float> %__W, <4 x float> %sub, <4 x float> %__Y, i8 %__U, i32 8) - ret <4 x float> %0 + %0 = extractelement <4 x float> %__W, i64 0 + %.rhs = extractelement <4 x float> %__X, i64 0 + %1 = fsub float -0.000000e+00, %.rhs + %.rhs1 = extractelement <4 x float> %__Y, i64 0 + %2 = fsub float -0.000000e+00, %.rhs1 + %3 = tail call float @llvm.x86.avx512.vfmadd.f32(float %0, float %1, float %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, float %3, float %.rhs1 + %7 = insertelement <4 x float> %__Y, float %6, i64 0 + ret <4 x float> %7 } define <2 x double> @test_mm_mask_fmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { @@ -5505,11 +5595,18 @@ define <2 x double> @test_mm_mask_fmadd_round_sd(<2 x double> %__W, i8 zeroext % ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %__B, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__A, i64 0 + %2 = extractelement <2 x double> %__B, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %0 + %7 = insertelement <2 x double> %__W, double %6, i64 0 + ret <2 x double> %7 } -declare <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 +declare double @llvm.x86.avx512.vfmadd.f64(double, double, double, i32) #1 define <2 x double> @test_mm_maskz_fmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { ; X86-LABEL: test_mm_maskz_fmadd_sd: @@ -5550,12 +5647,17 @@ define <2 x double> @test_mm_maskz_fmadd_round_sd(i8 zeroext %__U, <2 x double> ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %__C, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__A, i64 0 + %1 = extractelement <2 x double> %__B, i64 0 + %2 = extractelement <2 x double> %__C, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double 0.000000e+00 + %7 = insertelement <2 x double> %__A, double %6, i64 0 + ret <2 x double> %7 } -declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 - define <2 x double> @test_mm_mask3_fmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { ; X86-LABEL: test_mm_mask3_fmadd_sd: ; X86: # %bb.0: # %entry @@ -5600,12 +5702,17 @@ define <2 x double> @test_mm_mask3_fmadd_round_sd(<2 x double> %__W, <2 x double ; X64-NEXT: vmovapd %xmm2, %xmm0 ; X64-NEXT: retq entry: - %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__X, i64 0 + %2 = extractelement <2 x double> %__Y, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %2 + %7 = insertelement <2 x double> %__Y, double %6, i64 0 + ret <2 x double> %7 } -declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 - define <2 x double> @test_mm_mask_fmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fmsub_sd: ; X86: # %bb.0: # %entry @@ -5649,9 +5756,16 @@ define <2 x double> @test_mm_mask_fmsub_round_sd(<2 x double> %__W, i8 zeroext % ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B - %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %__A, <2 x double> %sub, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__A, i64 0 + %.rhs = extractelement <2 x double> %__B, i64 0 + %2 = fsub double -0.000000e+00, %.rhs + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %0 + %7 = insertelement <2 x double> %__W, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_maskz_fmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { @@ -5696,9 +5810,16 @@ define <2 x double> @test_mm_maskz_fmsub_round_sd(i8 zeroext %__U, <2 x double> ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C - %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %__B, <2 x double> %sub, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__A, i64 0 + %1 = extractelement <2 x double> %__B, i64 0 + %.rhs = extractelement <2 x double> %__C, i64 0 + %2 = fsub double -0.000000e+00, %.rhs + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double 0.000000e+00 + %7 = insertelement <2 x double> %__A, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_mask3_fmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { @@ -5734,24 +5855,34 @@ define <2 x double> @test_mm_mask3_fmsub_round_sd(<2 x double> %__W, <2 x double ; X86-LABEL: test_mm_mask3_fmsub_round_sd: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: vxorpd {{\.LCPI.*}}, %xmm2, %xmm3 +; X86-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X86-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; X86-NEXT: vmovapd %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fmsub_round_sd: ; X64: # %bb.0: # %entry +; X64-NEXT: vxorpd {{.*}}(%rip), %xmm2, %xmm3 +; X64-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X64-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; X64-NEXT: vmovapd %xmm2, %xmm0 ; X64-NEXT: retq entry: - %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %1 = extractelement <2 x double> %__X, i64 0 + %.rhs = extractelement <2 x double> %__Y, i64 0 + %2 = fsub double -0.000000e+00, %.rhs + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %.rhs + %7 = insertelement <2 x double> %__Y, double %6, i64 0 + ret <2 x double> %7 } -declare <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double>, <2 x double>, <2 x double>, i8, i32) #1 - define <2 x double> @test_mm_mask_fnmadd_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { ; X86-LABEL: test_mm_mask_fnmadd_sd: ; X86: # %bb.0: # %entry @@ -5795,9 +5926,16 @@ define <2 x double> @test_mm_mask_fnmadd_round_sd(<2 x double> %__W, i8 zeroext ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__A - %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__B, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs = extractelement <2 x double> %__A, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %2 = extractelement <2 x double> %__B, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %0 + %7 = insertelement <2 x double> %__W, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_maskz_fnmadd_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { @@ -5842,9 +5980,16 @@ define <2 x double> @test_mm_maskz_fnmadd_round_sd(i8 zeroext %__U, <2 x double> ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B - %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %__C, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__A, i64 0 + %.rhs = extractelement <2 x double> %__B, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %2 = extractelement <2 x double> %__C, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double 0.000000e+00 + %7 = insertelement <2 x double> %__A, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_mask3_fnmadd_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { @@ -5894,9 +6039,16 @@ define <2 x double> @test_mm_mask3_fnmadd_round_sd(<2 x double> %__W, <2 x doubl ; X64-NEXT: vmovapd %xmm2, %xmm0 ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__X - %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs = extractelement <2 x double> %__X, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %2 = extractelement <2 x double> %__Y, i64 0 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %2 + %7 = insertelement <2 x double> %__Y, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_mask_fnmsub_sd(<2 x double> %__W, i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B) { @@ -5947,10 +6099,17 @@ define <2 x double> @test_mm_mask_fnmsub_round_sd(<2 x double> %__W, i8 zeroext ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__A - %sub1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B - %0 = tail call <2 x double> @llvm.x86.avx512.mask.vfmadd.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs = extractelement <2 x double> %__A, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %.rhs2 = extractelement <2 x double> %__B, i64 0 + %2 = fsub double -0.000000e+00, %.rhs2 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %0 + %7 = insertelement <2 x double> %__W, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_maskz_fnmsub_sd(i8 zeroext %__U, <2 x double> %__A, <2 x double> %__B, <2 x double> %__C) { @@ -6000,10 +6159,17 @@ define <2 x double> @test_mm_maskz_fnmsub_round_sd(i8 zeroext %__U, <2 x double> ; X64-NEXT: vfmadd213sd {rn-sae}, %xmm2, %xmm1, %xmm0 {%k1} {z} ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__B - %sub1 = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__C - %0 = tail call <2 x double> @llvm.x86.avx512.maskz.vfmadd.sd(<2 x double> %__A, <2 x double> %sub, <2 x double> %sub1, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__A, i64 0 + %.rhs = extractelement <2 x double> %__B, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %.rhs2 = extractelement <2 x double> %__C, i64 0 + %2 = fsub double -0.000000e+00, %.rhs2 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double 0.000000e+00 + %7 = insertelement <2 x double> %__A, double %6, i64 0 + ret <2 x double> %7 } define <2 x double> @test_mm_mask3_fnmsub_sd(<2 x double> %__W, <2 x double> %__X, <2 x double> %__Y, i8 zeroext %__U) { @@ -6040,23 +6206,37 @@ define <2 x double> @test_mm_mask3_fnmsub_round_sd(<2 x double> %__W, <2 x doubl ; X86-LABEL: test_mm_mask3_fnmsub_round_sd: ; X86: # %bb.0: # %entry ; X86-NEXT: movb {{[0-9]+}}(%esp), %al -; X86-NEXT: vxorpd {{\.LCPI.*}}, %xmm1, %xmm1 +; X86-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] +; X86-NEXT: vxorpd %xmm3, %xmm1, %xmm1 +; X86-NEXT: vxorpd %xmm3, %xmm2, %xmm3 +; X86-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X86-NEXT: kmovw %eax, %k1 -; X86-NEXT: vfmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X86-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; X86-NEXT: vmovapd %xmm2, %xmm0 ; X86-NEXT: retl ; ; X64-LABEL: test_mm_mask3_fnmsub_round_sd: ; X64: # %bb.0: # %entry -; X64-NEXT: vxorpd {{.*}}(%rip), %xmm1, %xmm1 +; X64-NEXT: vmovapd {{.*#+}} xmm3 = [-0.000000e+00,-0.000000e+00] +; X64-NEXT: vxorpd %xmm3, %xmm1, %xmm1 +; X64-NEXT: vxorpd %xmm3, %xmm2, %xmm3 +; X64-NEXT: vfmadd213sd %xmm3, %xmm0, %xmm1 ; X64-NEXT: kmovw %edi, %k1 -; X64-NEXT: vfmsub231sd {rn-sae}, %xmm1, %xmm0, %xmm2 {%k1} +; X64-NEXT: vmovsd %xmm1, %xmm2, %xmm2 {%k1} ; X64-NEXT: vmovapd %xmm2, %xmm0 ; X64-NEXT: retq entry: - %sub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %__X - %0 = tail call <2 x double> @llvm.x86.avx512.mask3.vfmsub.sd(<2 x double> %__W, <2 x double> %sub, <2 x double> %__Y, i8 %__U, i32 8) - ret <2 x double> %0 + %0 = extractelement <2 x double> %__W, i64 0 + %.rhs = extractelement <2 x double> %__X, i64 0 + %1 = fsub double -0.000000e+00, %.rhs + %.rhs1 = extractelement <2 x double> %__Y, i64 0 + %2 = fsub double -0.000000e+00, %.rhs1 + %3 = tail call double @llvm.x86.avx512.vfmadd.f64(double %0, double %1, double %2, i32 8) + %4 = bitcast i8 %__U to <8 x i1> + %5 = extractelement <8 x i1> %4, i64 0 + %6 = select i1 %5, double %3, double %.rhs1 + %7 = insertelement <2 x double> %__Y, double %6, i64 0 + ret <2 x double> %7 } define <8 x i64> @test_mm512_mask_expandloadu_epi64(<8 x i64> %__W, i8 zeroext %__U, i8* readonly %__P) { |

