diff options
| author | Craig Topper <craig.topper@intel.com> | 2018-07-06 03:42:06 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2018-07-06 03:42:06 +0000 |
| commit | 4ea8949697828da0f702024b9fb2b23a19c69924 (patch) | |
| tree | 6a7985edcf5394ba7d4e2bd533ef56c07a0bb239 /llvm/test | |
| parent | 457cc34e48586b8920f8aa5a1a7e3364fbadfbf3 (diff) | |
| download | bcm5719-llvm-4ea8949697828da0f702024b9fb2b23a19c69924.tar.gz bcm5719-llvm-4ea8949697828da0f702024b9fb2b23a19c69924.zip | |
[X86] Cleanup some of the avx512 masked fma tests to prepare for removing and autoupgrading.
-Split cases that call 2 intrinsics in the same case.
-Remove testing mask3 and maskz intrinsics with an all ones mask. These won't be interesting after the upgrade.
-Restore test cases for some intrinsics that are marked for deletion, but haven't been deleted yet.
llvm-svn: 336408
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-fma-intrinsics-upgrade.ll | 670 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll | 416 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512vl-intrinsics.ll | 1230 |
3 files changed, 1099 insertions, 1217 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-fma-intrinsics-upgrade.ll b/llvm/test/CodeGen/X86/avx512-fma-intrinsics-upgrade.ll new file mode 100644 index 00000000000..1089500e390 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-fma-intrinsics-upgrade.ll @@ -0,0 +1,670 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx512f | FileCheck %s + +declare <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) +declare <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <16 x float> @test_x86_vfnmadd_ps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_x86_vfnmadd_ps_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmadd213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + ret <16 x float> %res +} +declare <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone + +define <16 x float> @test_mask_vfnmadd_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_vfnmadd_ps: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind + ret <16 x float> %res +} + +define <8 x double> @test_x86_vfnmadd_pd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_x86_vfnmadd_pd_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmadd213pd {{.*#+}} zmm0 = -(zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + ret <8 x double> %res +} +declare <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone + +define <8 x double> @test_mask_vfnmadd_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_vfnmadd_pd: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind + ret <8 x double> %res +} + +define <16 x float> @test_x86_vfnmsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_x86_vfnmsubps_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213ps {{.*#+}} zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + ret <16 x float> %res +} +declare <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone + +define <16 x float> @test_mask_vfnmsub_ps(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_vfnmsub_ps: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind + ret <16 x float> %res +} + +define <8 x double> @test_x86_vfnmsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_x86_vfnmsubpd_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {{.*#+}} zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + ret <8 x double> %res +} +declare <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone + +define <8 x double> @test_mask_vfnmsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_vfnmsub_pd: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind + ret <8 x double> %res +} + +define <16 x float> @test_x86_vfmaddsubps_z(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_x86_vfmaddsubps_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_fmaddsub_ps(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask) { +; CHECK-LABEL: test_mask_fmaddsub_ps: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %a, <16 x float> %b, <16 x float> %c, i16 %mask, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) nounwind readnone + +define <8 x double> @test_x86_vfmaddsubpd_z(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_x86_vfmaddsubpd_z: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + ret <8 x double> %res +} +declare <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) nounwind readnone + +define <8 x double> @test_mask_vfmaddsub_pd(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_vfmaddsub_pd: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind + ret <8 x double> %res +} + +define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub132pd {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub213pd {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +define <16 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub132ps {{.*#+}} zmm0 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) +/- zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmaddsub213ps {{.*#+}} zmm0 = (zmm1 * zmm0) +/- zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmsubadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmsubadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) -/+ zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 0) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 1) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 2) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 3) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrb_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrb_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask, i32 4) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 0) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtn(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213ps {rd-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 1) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtp(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213ps {ru-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 2) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_rtz(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213ps {rz-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 3) nounwind + ret <16 x float> %res +} + +define <16 x float> @test_mask_round_vfmadd512_ps_rrbz_current(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_ps_rrbz_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 -1, i32 4) nounwind + ret <16 x float> %res +} + +declare <8 x double> @llvm.x86.avx512.mask3.vfmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_mask3_vfmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmsub231pd {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask3.vfmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_mask3_vfmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmsub231ps {{.*#+}} zmm2 = (zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrb_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213pd {rd-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213pd {ru-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213pd {rz-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfmadd512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfmadd512_pd_rrbz_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + ret <8 x double> %res +} + +define <8 x double>@test_int_x86_avx512_mask_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132pd {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_mask3_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd231pd {{.*#+}} zmm2 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_maskz_vfmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd213pd {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +define <16 x float>@test_int_x86_avx512_mask_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfmadd_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd132ps {{.*#+}} zmm0 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_mask3_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd231ps {{.*#+}} zmm2 = (zmm0 * zmm1) + zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_maskz_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfmadd213ps {{.*#+}} zmm0 = (zmm1 * zmm0) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {rn-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 0) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {rd-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 1) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {ru-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 2) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {rz-sae}, %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 3) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrb_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrb_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask, i32 4) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rne: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 0) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtn(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtn: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {rd-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 1) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtp(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtp: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {ru-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 2) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_rtz(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_rtz: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {rz-sae}, %zmm2, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 3) nounwind + ret <8 x double> %res +} + +define <8 x double> @test_mask_round_vfnmsub512_pd_rrbz_current(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2) { +; CHECK-LABEL: test_mask_round_vfnmsub512_pd_rrbz_current: +; CHECK: ## %bb.0: +; CHECK-NEXT: vfnmsub213pd {{.*#+}} zmm0 = -(zmm1 * zmm0) - zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 -1, i32 4) nounwind + ret <8 x double> %res +} + +define <8 x double>@test_int_x86_avx512_mask_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +declare <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) + +define <8 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub231pd {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +define <16 x float>@test_int_x86_avx512_mask_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +declare <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) + +define <16 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmsub231ps {{.*#+}} zmm2 = -(zmm0 * zmm1) - zmm2 +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} + +define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmadd132pd {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) + ret <8 x double> %res +} + +define <16 x float>@test_int_x86_avx512_mask_vfnmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ +; CHECK-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_512: +; CHECK: ## %bb.0: +; CHECK-NEXT: kmovw %edi, %k1 +; CHECK-NEXT: vfnmadd132ps {{.*#+}} zmm0 = -(zmm0 * zmm1) + zmm2 +; CHECK-NEXT: retq + %res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) + ret <16 x float> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll b/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll index c99379e26d2..f758aebaea5 100644 --- a/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512-fma-intrinsics.ll @@ -192,28 +192,20 @@ define <8 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_512(<8 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X86-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x96,0xd9] -; X86-NEXT: # zmm3 = (zmm3 * zmm1) +/- zmm2 -; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x96,0xc1] +; X86-NEXT: # zmm0 = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X64-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x96,0xd9] -; X64-NEXT: # zmm3 = (zmm3 * zmm1) +/- zmm2 -; X64-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x96,0xc1] +; X64-NEXT: # zmm0 = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4) %bc = bitcast i8 %x3 to <8 x i1> %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0 - %res1 = call <8 x double> @llvm.x86.avx512.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0) - %res2 = fadd <8 x double> %sel, %res1 - ret <8 x double> %res2 + ret <8 x double> %sel } declare <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -223,26 +215,20 @@ define <8 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_512(<8 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X86-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) +/- zmm3 -; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) +/- zmm2 +; X86-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X64-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) +/- zmm3 -; X64-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb6,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) +/- zmm2 +; X64-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } declare <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -252,54 +238,38 @@ define <8 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_512(<8 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm1, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd9] -; X86-NEXT: vfmaddsub213pd %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0xa6,0xda] -; X86-NEXT: # zmm3 = (zmm0 * zmm3) +/- zmm2 -; X86-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0xa6,0xc2] +; X86-NEXT: # zmm0 = (zmm1 * zmm0) +/- zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm1, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0xa6,0xda] -; X64-NEXT: # zmm3 = (zmm0 * zmm3) +/- zmm2 -; X64-NEXT: vfmaddsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa6,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub213pd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0xa6,0xc2] +; X64-NEXT: # zmm0 = (zmm1 * zmm0) +/- zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } define <16 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ ; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X86-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x96,0xd9] -; X86-NEXT: # zmm3 = (zmm3 * zmm1) +/- zmm2 -; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x96,0xc1] +; X86-NEXT: # zmm0 = (zmm0 * zmm1) +/- zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X64-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x96,0xd9] -; X64-NEXT: # zmm3 = (zmm3 * zmm1) +/- zmm2 -; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x96,0xc1] +; X64-NEXT: # zmm0 = (zmm0 * zmm1) +/- zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4) %bc = bitcast i16 %x3 to <16 x i1> %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0 - %res1 = call <16 x float> @llvm.x86.avx512.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0) - %res2 = fadd <16 x float> %sel, %res1 - ret <16 x float> %res2 + ret <16 x float> %sel } declare <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -308,26 +278,20 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_512(<16 x float> %x0, ; X86-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X86-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) +/- zmm3 -; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) +/- zmm2 +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X64-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) +/- zmm3 -; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb6,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) +/- zmm2 +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } declare <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -336,26 +300,18 @@ define <16 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_512(<16 x float> %x0, ; X86-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm1, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] -; X86-NEXT: vfmaddsub213ps %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xa6,0xda] -; X86-NEXT: # zmm3 = (zmm0 * zmm3) +/- zmm2 -; X86-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0xa6,0xc2] +; X86-NEXT: # zmm0 = (zmm1 * zmm0) +/- zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm1, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xa6,0xda] -; X64-NEXT: # zmm3 = (zmm0 * zmm3) +/- zmm2 -; X64-NEXT: vfmaddsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa6,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmaddsub213ps %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0xa6,0xc2] +; X64-NEXT: # zmm0 = (zmm1 * zmm0) +/- zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } declare <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -365,26 +321,20 @@ define <8 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_512(<8 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X86-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) -/+ zmm3 -; X86-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa7,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) -/+ zmm2 +; X86-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X64-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) -/+ zmm3 -; X64-NEXT: vfmsubadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa7,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmsubadd231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb7,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) -/+ zmm2 +; X64-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } declare <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -393,26 +343,20 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_512(<16 x float> %x0, ; X86-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X86-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) -/+ zmm3 -; X86-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa7,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) -/+ zmm2 +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X64-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) -/+ zmm3 -; X64-NEXT: vfmsubadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa7,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmsubadd231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb7,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) -/+ zmm2 +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } define <16 x float> @test_mask_round_vfmadd512_ps_rrb_rne(<16 x float> %a0, <16 x float> %a1, <16 x float> %a2, i16 %mask) { @@ -560,26 +504,20 @@ define <8 x double>@test_int_x86_avx512_mask3_vfmsub_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X86-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) - zmm3 -; X86-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xaa,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) - zmm2 +; X86-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X64-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) - zmm3 -; X64-NEXT: vfmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xaa,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xba,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) - zmm2 +; X64-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask3.vfmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask3.vfmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } declare <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -588,26 +526,20 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmsub_ps_512(<16 x float> %x0, <1 ; X86-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X86-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) - zmm3 -; X86-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xaa,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) - zmm2 +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X64-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) - zmm3 -; X64-NEXT: vfmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xaa,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xba,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) - zmm2 +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask3.vfmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } define <8 x double> @test_mask_round_vfmadd512_pd_rrb_rne(<8 x double> %a0, <8 x double> %a1, <8 x double> %a2, i8 %mask) { @@ -758,28 +690,20 @@ define <8 x double>@test_int_x86_avx512_mask_vfmadd_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X86-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x98,0xd9] -; X86-NEXT: # zmm3 = (zmm3 * zmm1) + zmm2 -; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x98,0xc1] +; X86-NEXT: # zmm0 = (zmm0 * zmm1) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfmadd_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X64-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x98,0xd9] -; X64-NEXT: # zmm3 = (zmm3 * zmm1) + zmm2 -; X64-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmadd132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x98,0xc1] +; X64-NEXT: # zmm0 = (zmm0 * zmm1) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 4) %bc = bitcast i8 %x3 to <8 x i1> %sel = select <8 x i1> %bc, <8 x double> %res, <8 x double> %x0 - %res1 = call <8 x double> @llvm.x86.avx512.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i32 0) - %res2 = fadd <8 x double> %sel, %res1 - ret <8 x double> %res2 + ret <8 x double> %sel } declare <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -789,26 +713,20 @@ define <8 x double>@test_int_x86_avx512_mask3_vfmadd_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X86-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) + zmm3 -; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) + zmm2 +; X86-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X64-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) + zmm3 -; X64-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmadd231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xb8,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) + zmm2 +; X64-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask3.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } declare <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -818,54 +736,38 @@ define <8 x double>@test_int_x86_avx512_maskz_vfmadd_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm1, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd9] -; X86-NEXT: vfmadd213pd %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0xa8,0xda] -; X86-NEXT: # zmm3 = (zmm0 * zmm3) + zmm2 -; X86-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0xa8,0xc2] +; X86-NEXT: # zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm1, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd9] -; X64-NEXT: vfmadd213pd %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xc9,0xa8,0xda] -; X64-NEXT: # zmm3 = (zmm0 * zmm3) + zmm2 -; X64-NEXT: vfmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xa8,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfmadd213pd %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xc9,0xa8,0xc2] +; X64-NEXT: # zmm0 = (zmm1 * zmm0) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.maskz.vfmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } define <16 x float>@test_int_x86_avx512_mask_vfmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ ; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X86-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x98,0xd9] -; X86-NEXT: # zmm3 = (zmm3 * zmm1) + zmm2 -; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x98,0xc1] +; X86-NEXT: # zmm0 = (zmm0 * zmm1) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X64-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x98,0xd9] -; X64-NEXT: # zmm3 = (zmm3 * zmm1) + zmm2 -; X64-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmadd132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x98,0xc1] +; X64-NEXT: # zmm0 = (zmm0 * zmm1) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 4) %bc = bitcast i16 %x3 to <16 x i1> %sel = select <16 x i1> %bc, <16 x float> %res, <16 x float> %x0 - %res1 = call <16 x float> @llvm.x86.avx512.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i32 0) - %res2 = fadd <16 x float> %sel, %res1 - ret <16 x float> %res2 + ret <16 x float> %sel } declare <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -874,26 +776,20 @@ define <16 x float>@test_int_x86_avx512_mask3_vfmadd_ps_512(<16 x float> %x0, <1 ; X86-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X86-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd9] -; X86-NEXT: # zmm3 = (zmm0 * zmm1) + zmm3 -; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd1] +; X86-NEXT: # zmm2 = (zmm0 * zmm1) + zmm2 +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X64-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd9] -; X64-NEXT: # zmm3 = (zmm0 * zmm1) + zmm3 -; X64-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmadd231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xb8,0xd1] +; X64-NEXT: # zmm2 = (zmm0 * zmm1) + zmm2 +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask3.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } declare <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -902,26 +798,18 @@ define <16 x float>@test_int_x86_avx512_maskz_vfmadd_ps_512(<16 x float> %x0, <1 ; X86-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm1, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] -; X86-NEXT: vfmadd213ps %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xa8,0xda] -; X86-NEXT: # zmm3 = (zmm0 * zmm3) + zmm2 -; X86-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0xa8,0xc2] +; X86-NEXT: # zmm0 = (zmm1 * zmm0) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm1, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd9] -; X64-NEXT: vfmadd213ps %zmm2, %zmm0, %zmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xc9,0xa8,0xda] -; X64-NEXT: # zmm3 = (zmm0 * zmm3) + zmm2 -; X64-NEXT: vfmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xa8,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfmadd213ps %zmm2, %zmm1, %zmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xc9,0xa8,0xc2] +; X64-NEXT: # zmm0 = (zmm1 * zmm0) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.maskz.vfmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } @@ -1063,26 +951,18 @@ define <8 x double>@test_int_x86_avx512_mask_vfnmsub_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X86-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9e,0xd9] -; X86-NEXT: # zmm3 = -(zmm3 * zmm1) - zmm2 -; X86-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xae,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9e,0xc1] +; X86-NEXT: # zmm0 = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X64-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9e,0xd9] -; X64-NEXT: # zmm3 = -(zmm3 * zmm1) - zmm2 -; X64-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xae,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfnmsub132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9e,0xc1] +; X64-NEXT: # zmm0 = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } declare <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double>, <8 x double>, <8 x double>, i8, i32) @@ -1092,52 +972,38 @@ define <8 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_512(<8 x double> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X86-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd9] -; X86-NEXT: # zmm3 = -(zmm0 * zmm1) - zmm3 -; X86-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xae,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd1] +; X86-NEXT: # zmm2 = -(zmm0 * zmm1) - zmm2 +; X86-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm2, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xda] -; X64-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd9] -; X64-NEXT: # zmm3 = -(zmm0 * zmm1) - zmm3 -; X64-NEXT: vfnmsub213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xae,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfnmsub231pd %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x49,0xbe,0xd1] +; X64-NEXT: # zmm2 = -(zmm0 * zmm1) - zmm2 +; X64-NEXT: vmovapd %zmm2, %zmm0 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } define <16 x float>@test_int_x86_avx512_mask_vfnmsub_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ ; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X86-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xd9] -; X86-NEXT: # zmm3 = -(zmm3 * zmm1) - zmm2 -; X86-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xae,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xc1] +; X86-NEXT: # zmm0 = -(zmm0 * zmm1) - zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X64-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xd9] -; X64-NEXT: # zmm3 = -(zmm3 * zmm1) - zmm2 -; X64-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xae,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfnmsub132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9e,0xc1] +; X64-NEXT: # zmm0 = -(zmm0 * zmm1) - zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } declare <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float>, <16 x float>, <16 x float>, i16, i32) @@ -1146,26 +1012,20 @@ define <16 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_512(<16 x float> %x0, < ; X86-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X86-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd9] -; X86-NEXT: # zmm3 = -(zmm0 * zmm1) - zmm3 -; X86-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xae,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd1] +; X86-NEXT: # zmm2 = -(zmm0 * zmm1) - zmm2 +; X86-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm2, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xda] -; X64-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd9] -; X64-NEXT: # zmm3 = -(zmm0 * zmm1) - zmm3 -; X64-NEXT: vfnmsub213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xae,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfnmsub231ps %zmm1, %zmm0, %zmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x49,0xbe,0xd1] +; X64-NEXT: # zmm2 = -(zmm0 * zmm1) - zmm2 +; X64-NEXT: vmovaps %zmm2, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3){ @@ -1173,50 +1033,34 @@ define <8 x double>@test_int_x86_avx512_mask_vfnmadd_pd_512(<8 x double> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X86-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9c,0xd9] -; X86-NEXT: # zmm3 = -(zmm3 * zmm1) + zmm2 -; X86-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xac,0xca] -; X86-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X86-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9c,0xc1] +; X86-NEXT: # zmm0 = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %zmm0, %zmm3 # encoding: [0x62,0xf1,0xfd,0x48,0x28,0xd8] -; X64-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9c,0xd9] -; X64-NEXT: # zmm3 = -(zmm3 * zmm1) + zmm2 -; X64-NEXT: vfnmadd213pd {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0xfd,0x18,0xac,0xca] -; X64-NEXT: vaddpd %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0xe5,0x48,0x58,0xc1] +; X64-NEXT: vfnmadd132pd %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x49,0x9c,0xc1] +; X64-NEXT: # zmm0 = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 %x3, i32 4) - %res1 = call <8 x double> @llvm.x86.avx512.mask.vfnmadd.pd.512(<8 x double> %x0, <8 x double> %x1, <8 x double> %x2, i8 -1, i32 0) - %res2 = fadd <8 x double> %res, %res1 - ret <8 x double> %res2 + ret <8 x double> %res } define <16 x float>@test_int_x86_avx512_mask_vfnmadd_ps_512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3){ ; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_512: ; X86: # %bb.0: ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1 # encoding: [0xc5,0xf8,0x90,0x4c,0x24,0x04] -; X86-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X86-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xd9] -; X86-NEXT: # zmm3 = -(zmm3 * zmm1) + zmm2 -; X86-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xac,0xca] -; X86-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X86-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xc1] +; X86-NEXT: # zmm0 = -(zmm0 * zmm1) + zmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_512: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %zmm0, %zmm3 # encoding: [0x62,0xf1,0x7c,0x48,0x28,0xd8] -; X64-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xd9] -; X64-NEXT: # zmm3 = -(zmm3 * zmm1) + zmm2 -; X64-NEXT: vfnmadd213ps {rn-sae}, %zmm2, %zmm0, %zmm1 # encoding: [0x62,0xf2,0x7d,0x18,0xac,0xca] -; X64-NEXT: vaddps %zmm1, %zmm3, %zmm0 # encoding: [0x62,0xf1,0x64,0x48,0x58,0xc1] +; X64-NEXT: vfnmadd132ps %zmm1, %zmm2, %zmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x49,0x9c,0xc1] +; X64-NEXT: # zmm0 = -(zmm0 * zmm1) + zmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 %x3, i32 4) - %res1 = call <16 x float> @llvm.x86.avx512.mask.vfnmadd.ps.512(<16 x float> %x0, <16 x float> %x1, <16 x float> %x2, i16 -1, i32 0) - %res2 = fadd <16 x float> %res, %res1 - ret <16 x float> %res2 + ret <16 x float> %res } diff --git a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll index 864ad6c6ceb..aa178a337b1 100644 --- a/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll +++ b/llvm/test/CodeGen/X86/avx512vl-intrinsics.ll @@ -5980,6 +5980,16 @@ declare <4 x i64> @llvm.x86.avx512.psrav.q.256(<4 x i64>, <4 x i64>) nounwind re declare <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone +define <8 x float> @test_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { +; CHECK-LABEL: test_vfmadd256_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa8,0xc2] +; CHECK-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 -1) nounwind + ret <8 x float> %res +} + define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfmadd256_ps: ; X86: # %bb.0: @@ -6001,6 +6011,16 @@ define <8 x float> @test_mask_vfmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 declare <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone +define <4 x float> @test_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { +; CHECK-LABEL: test_vfmadd128_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2] +; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind + ret <4 x float> %res +} + define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfmadd128_ps: ; X86: # %bb.0: @@ -6022,6 +6042,16 @@ define <4 x float> @test_mask_vfmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 declare <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) +define <4 x double> @test_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_fmadd256_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2] +; CHECK-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 -1) + ret <4 x double> %res +} + define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c, i8 %mask) { ; X86-LABEL: test_mask_fmadd256_pd: ; X86: # %bb.0: @@ -6043,6 +6073,16 @@ define <4 x double> @test_mask_fmadd256_pd(<4 x double> %a, <4 x double> %b, <4 declare <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) +define <2 x double> @test_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_fmadd128_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2] +; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 -1) + ret <2 x double> %res +} + define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c, i8 %mask) { ; X86-LABEL: test_mask_fmadd128_pd: ; X86: # %bb.0: @@ -6062,35 +6102,6 @@ define <2 x double> @test_mask_fmadd128_pd(<2 x double> %a, <2 x double> %b, <2 ret <2 x double> %res } -define <2 x double>@test_int_x86_avx512_mask_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmadd_pd_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8] -; X86-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xd9] -; X86-NEXT: # xmm3 = (xmm3 * xmm1) + xmm2 -; X86-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmadd_pd_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X64-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - declare <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { @@ -6098,28 +6109,20 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmadd_pd_128(<2 x double> %x0, <2 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda] -; X86-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) + xmm3 -; X86-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 ; X64-NEXT: vfmadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb8,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3] +; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 + ret <2 x double> %res } declare <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) @@ -6129,57 +6132,18 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmadd_pd_128(<2 x double> %x0, <2 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X86-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xa8,0xda] -; X86-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X86-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa8,0xc2] +; X86-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X64-NEXT: vfmadd213pd %xmm2, %xmm0, %xmm1 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xa8,0xca] -; X64-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3] +; X64-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa8,0xc2] +; X64-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - -define <4 x double>@test_int_x86_avx512_mask_vfmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmadd_pd_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8] -; X86-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xd9] -; X86-NEXT: # ymm3 = (ymm3 * ymm1) + ymm2 -; X86-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmadd_pd_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X64-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1] -; X64-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <2 x double> %res } declare <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -6189,28 +6153,20 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmadd_pd_256(<4 x double> %x0, <4 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda] -; X86-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) + ymm3 -; X86-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 ; X64-NEXT: vfmadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb8,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3] +; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <4 x double> %res } declare <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -6220,57 +6176,18 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmadd_pd_256(<4 x double> %x0, <4 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X86-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0xa8,0xda] -; X86-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X86-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa8,0xc2] +; X86-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X64-NEXT: vfmadd213pd %ymm2, %ymm0, %ymm1 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0xa8,0xca] -; X64-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3] +; X64-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa8,0xc2] +; X64-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 -} - -define <4 x float>@test_int_x86_avx512_mask_vfmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ps_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8] -; X86-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xd9] -; X86-NEXT: # xmm3 = (xmm3 * xmm1) + xmm2 -; X86-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ps_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X64-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x double> %res } declare <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -6280,28 +6197,20 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmadd_ps_128(<4 x float> %x0, <4 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda] -; X86-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) + xmm3 -; X86-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 ; X64-NEXT: vfmadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb8,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddps %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x float> %res } declare <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -6311,57 +6220,18 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmadd_ps_128(<4 x float> %x0, <4 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X86-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0xa8,0xda] -; X86-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X86-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa8,0xc2] +; X86-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa8,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) + xmm2 -; X64-NEXT: vfmadd213ps %xmm2, %xmm0, %xmm1 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0xa8,0xca] -; X64-NEXT: # xmm1 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddps %xmm3, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3] +; X64-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa8,0xc2] +; X64-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 -} - -define <8 x float>@test_int_x86_avx512_mask_vfmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmadd_ps_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8] -; X86-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x98,0xd9] -; X86-NEXT: # ymm3 = (ymm3 * ymm1) + ymm2 -; X86-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmadd_ps_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X64-NEXT: vfmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x98,0xc1] -; X64-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddps %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <4 x float> %res } declare <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -6371,28 +6241,20 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmadd_ps_256(<8 x float> %x0, <8 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; X86-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) + ymm3 -; X86-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmadd_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 ; X64-NEXT: vfmadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb8,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddps %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <8 x float> %res } declare <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -6402,28 +6264,18 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmadd_ps_256(<8 x float> %x0, <8 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X86-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0xa8,0xda] -; X86-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X86-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa8,0xc2] +; X86-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmadd_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa8,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) + ymm2 -; X64-NEXT: vfmadd213ps %ymm2, %ymm0, %ymm1 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0xa8,0xca] -; X64-NEXT: # ymm1 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddps %ymm3, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3] +; X64-NEXT: vfmadd213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa8,0xc2] +; X64-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <8 x float> %res } @@ -6434,28 +6286,20 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsub_pd_128(<2 x double> %x0, <2 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda] -; X86-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) - xmm3 -; X86-NEXT: vfmsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xaa,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) - xmm2 ; X64-NEXT: vfmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xba,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3] +; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 + ret <2 x double> %res } @@ -6466,28 +6310,20 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsub_pd_256(<4 x double> %x0, <4 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda] -; X86-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) - ymm3 -; X86-NEXT: vfmsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xaa,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) - ymm2 ; X64-NEXT: vfmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xba,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3] +; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <4 x double> %res } declare <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -6497,28 +6333,20 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsub_ps_128(<4 x float> %x0, <4 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda] -; X86-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) - xmm3 -; X86-NEXT: vfmsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xaa,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) - xmm2 ; X64-NEXT: vfmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xba,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddps %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x float> %res } declare <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -6528,32 +6356,34 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsub_ps_256(<8 x float> %x0, <8 x ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; X86-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) - ymm3 -; X86-NEXT: vfmsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsub_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xaa,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) - ymm2 ; X64-NEXT: vfmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xba,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddps %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <8 x float> %res } declare <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone +define <8 x float> @test_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { +; CHECK-LABEL: test_vfnmadd256_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xac,0xc2] +; CHECK-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 -1) nounwind + ret <8 x float> %res +} + define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmadd256_ps: ; X86: # %bb.0: @@ -6575,6 +6405,16 @@ define <8 x float> @test_mask_vfnmadd256_ps(<8 x float> %a0, <8 x float> %a1, <8 declare <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone +define <4 x float> @test_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { +; CHECK-LABEL: test_vfnmadd128_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xac,0xc2] +; CHECK-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind + ret <4 x float> %res +} + define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmadd128_ps: ; X86: # %bb.0: @@ -6596,6 +6436,16 @@ define <4 x float> @test_mask_vfnmadd128_ps(<4 x float> %a0, <4 x float> %a1, <4 declare <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone +define <4 x double> @test_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { +; CHECK-LABEL: test_vfnmadd256_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xac,0xc2] +; CHECK-NEXT: # ymm0 = -(ymm1 * ymm0) + ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind + ret <4 x double> %res +} + define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmadd256_pd: ; X86: # %bb.0: @@ -6617,6 +6467,16 @@ define <4 x double> @test_mask_vfnmadd256_pd(<4 x double> %a0, <4 x double> %a1, declare <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone +define <2 x double> @test_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { +; CHECK-LABEL: test_vfnmadd128_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xac,0xc2] +; CHECK-NEXT: # xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind + ret <2 x double> %res +} + define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmadd128_pd: ; X86: # %bb.0: @@ -6638,6 +6498,16 @@ define <2 x double> @test_mask_vfnmadd128_pd(<2 x double> %a0, <2 x double> %a1, declare <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone +define <8 x float> @test_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2) { +; CHECK-LABEL: test_vfnmsub256_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xae,0xc2] +; CHECK-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 -1) nounwind + ret <8 x float> %res +} + define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmsub256_ps: ; X86: # %bb.0: @@ -6659,6 +6529,16 @@ define <8 x float> @test_mask_vfnmsub256_ps(<8 x float> %a0, <8 x float> %a1, <8 declare <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone +define <4 x float> @test_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { +; CHECK-LABEL: test_vfnmsub128_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xae,0xc2] +; CHECK-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind + ret <4 x float> %res +} + define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmsub128_ps: ; X86: # %bb.0: @@ -6680,6 +6560,16 @@ define <4 x float> @test_mask_vfnmsub128_ps(<4 x float> %a0, <4 x float> %a1, <4 declare <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone +define <4 x double> @test_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { +; CHECK-LABEL: test_vfnmsub256_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xae,0xc2] +; CHECK-NEXT: # ymm0 = -(ymm1 * ymm0) - ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind + ret <4 x double> %res +} + define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmsub256_pd: ; X86: # %bb.0: @@ -6701,6 +6591,16 @@ define <4 x double> @test_mask_vfnmsub256_pd(<4 x double> %a0, <4 x double> %a1, declare <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone +define <2 x double> @test_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { +; CHECK-LABEL: test_vfnmsub128_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfnmsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xae,0xc2] +; CHECK-NEXT: # xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind + ret <2 x double> %res +} + define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfnmsub128_pd: ; X86: # %bb.0: @@ -6720,36 +6620,6 @@ define <2 x double> @test_mask_vfnmsub128_pd(<2 x double> %a0, <2 x double> %a1, ret <2 x double> %res } - -define <2 x double>@test_int_x86_avx512_mask_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8] -; X86-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9e,0xd9] -; X86-NEXT: # xmm3 = -(xmm3 * xmm1) - xmm2 -; X86-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) - xmm2 -; X64-NEXT: vfnmsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9e,0xc1] -; X64-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - declare <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { @@ -6757,57 +6627,20 @@ define <2 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_128(<2 x double> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda] -; X86-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd9] -; X86-NEXT: # xmm3 = -(xmm0 * xmm1) - xmm3 -; X86-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1] +; X86-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfnmsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xae,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) - xmm2 ; X64-NEXT: vfnmsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xbe,0xd1] ; X64-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3] +; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - -define <4 x double>@test_int_x86_avx512_mask_vfnmsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8] -; X86-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9e,0xd9] -; X86-NEXT: # ymm3 = -(ymm3 * ymm1) - ymm2 -; X86-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_pd_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) - ymm2 -; X64-NEXT: vfnmsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9e,0xc1] -; X64-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <2 x double> %res } declare <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -6817,57 +6650,20 @@ define <4 x double>@test_int_x86_avx512_mask3_vfnmsub_pd_256(<4 x double> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda] -; X86-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd9] -; X86-NEXT: # ymm3 = -(ymm0 * ymm1) - ymm3 -; X86-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1] +; X86-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfnmsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xae,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) - ymm2 ; X64-NEXT: vfnmsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xbe,0xd1] ; X64-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3] +; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfnmsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 -} - -define <4 x float>@test_int_x86_avx512_mask_vfnmsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8] -; X86-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xd9] -; X86-NEXT: # xmm3 = -(xmm3 * xmm1) - xmm2 -; X86-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) - xmm2 -; X64-NEXT: vfnmsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9e,0xc1] -; X64-NEXT: # xmm0 = -(xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x double> %res } declare <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -6877,57 +6673,20 @@ define <4 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_128(<4 x float> %x0, <4 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda] -; X86-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd9] -; X86-NEXT: # xmm3 = -(xmm0 * xmm1) - xmm3 -; X86-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) - xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1] +; X86-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfnmsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xae,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) - xmm2 ; X64-NEXT: vfnmsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xbe,0xd1] ; X64-NEXT: # xmm2 = -(xmm0 * xmm1) - xmm2 -; X64-NEXT: vaddps %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 -} - -define <8 x float>@test_int_x86_avx512_mask_vfnmsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8] -; X86-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xd9] -; X86-NEXT: # ymm3 = -(ymm3 * ymm1) - ymm2 -; X86-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmsub_ps_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) - ymm2 -; X64-NEXT: vfnmsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9e,0xc1] -; X64-NEXT: # ymm0 = -(ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddps %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <4 x float> %res } declare <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -6937,148 +6696,34 @@ define <8 x float>@test_int_x86_avx512_mask3_vfnmsub_ps_256(<8 x float> %x0, <8 ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; X86-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd9] -; X86-NEXT: # ymm3 = -(ymm0 * ymm1) - ymm3 -; X86-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) - ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1] +; X86-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfnmsub_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfnmsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xae,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) - ymm2 ; X64-NEXT: vfnmsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xbe,0xd1] ; X64-NEXT: # ymm2 = -(ymm0 * ymm1) - ymm2 -; X64-NEXT: vaddps %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfnmsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 -} - -define <2 x double>@test_int_x86_avx512_mask_vfnmadd_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8] -; X86-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9c,0xd9] -; X86-NEXT: # xmm3 = -(xmm3 * xmm1) + xmm2 -; X86-NEXT: vfnmadd213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfnmadd213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xac,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) + xmm2 -; X64-NEXT: vfnmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x9c,0xc1] -; X64-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask.vfnmadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - -define <4 x double>@test_int_x86_avx512_mask_vfnmadd_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8] -; X86-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9c,0xd9] -; X86-NEXT: # ymm3 = -(ymm3 * ymm1) + ymm2 -; X86-NEXT: vfnmadd213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_pd_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfnmadd213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xac,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) + ymm2 -; X64-NEXT: vfnmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x9c,0xc1] -; X64-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask.vfnmadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <8 x float> %res } -define <4 x float>@test_int_x86_avx512_mask_vfnmadd_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8] -; X86-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xd9] -; X86-NEXT: # xmm3 = -(xmm3 * xmm1) + xmm2 -; X86-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xca] -; X86-NEXT: # xmm1 = -(xmm0 * xmm1) + xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfnmadd213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xac,0xda] -; X64-NEXT: # xmm3 = -(xmm0 * xmm3) + xmm2 -; X64-NEXT: vfnmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x9c,0xc1] -; X64-NEXT: # xmm0 = -(xmm0 * xmm1) + xmm2 -; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask.vfnmadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 -} +declare <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone -define <8 x float>@test_int_x86_avx512_mask_vfnmadd_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8] -; X86-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xd9] -; X86-NEXT: # ymm3 = -(ymm3 * ymm1) + ymm2 -; X86-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xca] -; X86-NEXT: # ymm1 = -(ymm0 * ymm1) + ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfnmadd_ps_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfnmadd213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xac,0xda] -; X64-NEXT: # ymm3 = -(ymm0 * ymm3) + ymm2 -; X64-NEXT: vfnmadd132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x9c,0xc1] -; X64-NEXT: # ymm0 = -(ymm0 * ymm1) + ymm2 -; X64-NEXT: vaddps %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask.vfnmadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 +define <8 x float> @test_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_fmaddsub256_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x75,0xa6,0xc2] +; CHECK-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 -1) + ret <8 x float> %res } -declare <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) nounwind readnone - define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c, i8 %mask) { ; X86-LABEL: test_mask_fmaddsub256_ps: ; X86: # %bb.0: @@ -7100,6 +6745,16 @@ define <8 x float> @test_mask_fmaddsub256_ps(<8 x float> %a, <8 x float> %b, <8 declare <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) nounwind readnone +define <4 x float> @test_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_fmaddsub128_ps: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa6,0xc2] +; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 -1) + ret <4 x float> %res +} + define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c, i8 %mask) { ; X86-LABEL: test_mask_fmaddsub128_ps: ; X86: # %bb.0: @@ -7121,6 +6776,16 @@ define <4 x float> @test_mask_fmaddsub128_ps(<4 x float> %a, <4 x float> %b, <4 declare <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) nounwind readnone +define <4 x double> @test_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { +; CHECK-LABEL: test_vfmaddsub256_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa6,0xc2] +; CHECK-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind + ret <4 x double> %res +} + define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfmaddsub256_pd: ; X86: # %bb.0: @@ -7142,6 +6807,16 @@ define <4 x double> @test_mask_vfmaddsub256_pd(<4 x double> %a0, <4 x double> %a declare <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) nounwind readnone +define <2 x double> @test_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { +; CHECK-LABEL: test_vfmaddsub128_pd: +; CHECK: # %bb.0: +; CHECK-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa6,0xc2] +; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] + %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind + ret <2 x double> %res +} + define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { ; X86-LABEL: test_mask_vfmaddsub128_pd: ; X86: # %bb.0: @@ -7161,35 +6836,6 @@ define <2 x double> @test_mask_vfmaddsub128_pd(<2 x double> %a0, <2 x double> %a ret <2 x double> %res } -define <2 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd8] -; X86-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x96,0xd9] -; X86-NEXT: # xmm3 = (xmm3 * xmm1) +/- xmm2 -; X86-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X64-NEXT: vfmaddsub132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x96,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - declare <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) { @@ -7197,28 +6843,20 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_128(<2 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda] -; X86-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) +/- xmm3 -; X86-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 ; X64-NEXT: vfmaddsub231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb6,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3] +; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 + ret <2 x double> %res } declare <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) @@ -7228,57 +6866,18 @@ define <2 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_128(<2 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X86-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xa6,0xda] -; X86-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X86-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa6,0xc2] +; X86-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X64-NEXT: vfmaddsub213pd %xmm2, %xmm0, %xmm1 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0x89,0xa6,0xca] -; X64-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc3] +; X64-NEXT: vfmaddsub213pd %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0x89,0xa6,0xc2] +; X64-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2 = fadd <2 x double> %res, %res1 - ret <2 x double> %res2 -} - -define <4 x double>@test_int_x86_avx512_mask_vfmaddsub_pd_256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd8] -; X86-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x96,0xd9] -; X86-NEXT: # ymm3 = (ymm3 * ymm1) +/- ymm2 -; X86-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_pd_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X64-NEXT: vfmaddsub132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x96,0xc1] -; X64-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <2 x double> %res } declare <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -7288,28 +6887,20 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmaddsub_pd_256(<4 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda] -; X86-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) +/- ymm3 -; X86-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 ; X64-NEXT: vfmaddsub231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb6,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3] +; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <4 x double> %res } declare <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -7319,57 +6910,18 @@ define <4 x double>@test_int_x86_avx512_maskz_vfmaddsub_pd_256(<4 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X86-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0xa6,0xda] -; X86-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X86-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa6,0xc2] +; X86-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X64-NEXT: vfmaddsub213pd %ymm2, %ymm0, %ymm1 {%k1} {z} # encoding: [0x62,0xf2,0xfd,0xa9,0xa6,0xca] -; X64-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc3] +; X64-NEXT: vfmaddsub213pd %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0xf5,0xa9,0xa6,0xc2] +; X64-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.maskz.vfmaddsub.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2 = fadd <4 x double> %res, %res1 - ret <4 x double> %res2 -} - -define <4 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_128: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd8] -; X86-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x96,0xd9] -; X86-NEXT: # xmm3 = (xmm3 * xmm1) +/- xmm2 -; X86-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_128: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X64-NEXT: vfmaddsub132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x96,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddps %xmm3, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x double> %res } declare <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -7379,28 +6931,20 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_128(<4 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda] -; X86-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) +/- xmm3 -; X86-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 ; X64-NEXT: vfmaddsub231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb6,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddps %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x float> %res } declare <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -7410,57 +6954,18 @@ define <4 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_128(<4 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X86-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0xa6,0xda] -; X86-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X86-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa6,0xc2] +; X86-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa6,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) +/- xmm2 -; X64-NEXT: vfmaddsub213ps %xmm2, %xmm0, %xmm1 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0x89,0xa6,0xca] -; X64-NEXT: # xmm1 = (xmm0 * xmm1) +/- xmm2 -; X64-NEXT: vaddps %xmm3, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x58,0xc3] +; X64-NEXT: vfmaddsub213ps %xmm2, %xmm1, %xmm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0x89,0xa6,0xc2] +; X64-NEXT: # xmm0 = (xmm1 * xmm0) +/- xmm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2 = fadd <4 x float> %res, %res1 - ret <4 x float> %res2 -} - -define <8 x float>@test_int_x86_avx512_mask_vfmaddsub_ps_256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) { -; X86-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_256: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd8] -; X86-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm3 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x96,0xd9] -; X86-NEXT: # ymm3 = (ymm3 * ymm1) +/- ymm2 -; X86-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_int_x86_avx512_mask_vfmaddsub_ps_256: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X64-NEXT: vfmaddsub132ps %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x29,0x96,0xc1] -; X64-NEXT: # ymm0 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddps %ymm3, %ymm0, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x58,0xc3] -; X64-NEXT: retq # encoding: [0xc3] - %res = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <4 x float> %res } declare <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -7470,28 +6975,20 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmaddsub_ps_256(<8 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; X86-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) +/- ymm3 -; X86-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmaddsub_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 ; X64-NEXT: vfmaddsub231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb6,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddps %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <8 x float> %res } declare <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -7501,28 +6998,18 @@ define <8 x float>@test_int_x86_avx512_maskz_vfmaddsub_ps_256(<8 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X86-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0xa6,0xda] -; X86-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X86-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa6,0xc2] +; X86-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_maskz_vfmaddsub_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa6,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) +/- ymm2 -; X64-NEXT: vfmaddsub213ps %ymm2, %ymm0, %ymm1 {%k1} {z} # encoding: [0x62,0xf2,0x7d,0xa9,0xa6,0xca] -; X64-NEXT: # ymm1 = (ymm0 * ymm1) +/- ymm2 -; X64-NEXT: vaddps %ymm3, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xf4,0x58,0xc3] +; X64-NEXT: vfmaddsub213ps %ymm2, %ymm1, %ymm0 {%k1} {z} # encoding: [0x62,0xf2,0x75,0xa9,0xa6,0xc2] +; X64-NEXT: # ymm0 = (ymm1 * ymm0) +/- ymm2 ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.maskz.vfmaddsub.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2 = fadd <8 x float> %res, %res1 - ret <8 x float> %res2 + ret <8 x float> %res } declare <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double>, <2 x double>, <2 x double>, i8) @@ -7532,28 +7019,20 @@ define <2 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_128(<2 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xda] -; X86-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) -/+ xmm3 -; X86-NEXT: vfmsubadd213pd %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) -/+ xmm2 -; X86-NEXT: vaddpd %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe1,0x58,0xc1] +; X86-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2 +; X86-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xd9] -; X64-NEXT: vfmsubadd213pd %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf9,0xa7,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) -/+ xmm2 ; X64-NEXT: vfmsubadd231pd %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x09,0xb7,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2 -; X64-NEXT: vaddpd %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe9,0x58,0xc3] +; X64-NEXT: vmovapd %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 %x3) - %res1 = call <2 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.128(<2 x double> %x0, <2 x double> %x1, <2 x double> %x2, i8 -1) - %res2=fadd <2 x double> %res, %res1 - ret <2 x double> %res2 + ret <2 x double> %res } declare <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double>, <4 x double>, <4 x double>, i8) @@ -7563,28 +7042,20 @@ define <4 x double>@test_int_x86_avx512_mask3_vfmsubadd_pd_256(<4 x double> %x0, ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovapd %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xda] -; X86-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) -/+ ymm3 -; X86-NEXT: vfmsubadd213pd %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) -/+ ymm2 -; X86-NEXT: vaddpd %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe5,0x58,0xc1] +; X86-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2 +; X86-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_pd_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovapd %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xd9] -; X64-NEXT: vfmsubadd213pd %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xfd,0xa7,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) -/+ ymm2 ; X64-NEXT: vfmsubadd231pd %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0xfd,0x29,0xb7,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2 -; X64-NEXT: vaddpd %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xed,0x58,0xc3] +; X64-NEXT: vmovapd %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 %x3) - %res1 = call <4 x double> @llvm.x86.avx512.mask3.vfmsubadd.pd.256(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, i8 -1) - %res2=fadd <4 x double> %res, %res1 - ret <4 x double> %res2 + ret <4 x double> %res } declare <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float>, <4 x float>, <4 x float>, i8) @@ -7594,28 +7065,20 @@ define <4 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_128(<4 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %xmm2, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xda] -; X86-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd9] -; X86-NEXT: # xmm3 = (xmm0 * xmm1) -/+ xmm3 -; X86-NEXT: vfmsubadd213ps %xmm2, %xmm0, %xmm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xca] -; X86-NEXT: # xmm1 = (xmm0 * xmm1) -/+ xmm2 -; X86-NEXT: vaddps %xmm1, %xmm3, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe0,0x58,0xc1] +; X86-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1] +; X86-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2 +; X86-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_128: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %xmm1, %xmm3 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xd9] -; X64-NEXT: vfmsubadd213ps %xmm2, %xmm0, %xmm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0xa7,0xda] -; X64-NEXT: # xmm3 = (xmm0 * xmm3) -/+ xmm2 ; X64-NEXT: vfmsubadd231ps %xmm1, %xmm0, %xmm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x09,0xb7,0xd1] ; X64-NEXT: # xmm2 = (xmm0 * xmm1) -/+ xmm2 -; X64-NEXT: vaddps %xmm3, %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xe8,0x58,0xc3] +; X64-NEXT: vmovaps %xmm2, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 %x3) - %res1 = call <4 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.128(<4 x float> %x0, <4 x float> %x1, <4 x float> %x2, i8 -1) - %res2=fadd <4 x float> %res, %res1 - ret <4 x float> %res2 + ret <4 x float> %res } declare <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float>, <8 x float>, <8 x float>, i8) @@ -7625,59 +7088,22 @@ define <8 x float>@test_int_x86_avx512_mask3_vfmsubadd_ps_256(<8 x float> %x0, < ; X86: # %bb.0: ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] ; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vmovaps %ymm2, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xda] -; X86-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm3 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd9] -; X86-NEXT: # ymm3 = (ymm0 * ymm1) -/+ ymm3 -; X86-NEXT: vfmsubadd213ps %ymm2, %ymm0, %ymm1 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xca] -; X86-NEXT: # ymm1 = (ymm0 * ymm1) -/+ ymm2 -; X86-NEXT: vaddps %ymm1, %ymm3, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xe4,0x58,0xc1] +; X86-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1] +; X86-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2 +; X86-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X86-NEXT: retl # encoding: [0xc3] ; ; X64-LABEL: test_int_x86_avx512_mask3_vfmsubadd_ps_256: ; X64: # %bb.0: ; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vmovaps %ymm1, %ymm3 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xd9] -; X64-NEXT: vfmsubadd213ps %ymm2, %ymm0, %ymm3 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0xa7,0xda] -; X64-NEXT: # ymm3 = (ymm0 * ymm3) -/+ ymm2 ; X64-NEXT: vfmsubadd231ps %ymm1, %ymm0, %ymm2 {%k1} # encoding: [0x62,0xf2,0x7d,0x29,0xb7,0xd1] ; X64-NEXT: # ymm2 = (ymm0 * ymm1) -/+ ymm2 -; X64-NEXT: vaddps %ymm3, %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xec,0x58,0xc3] +; X64-NEXT: vmovaps %ymm2, %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x28,0xc2] ; X64-NEXT: retq # encoding: [0xc3] %res = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 %x3) - %res1 = call <8 x float> @llvm.x86.avx512.mask3.vfmsubadd.ps.256(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, i8 -1) - %res2=fadd <8 x float> %res, %res1 - ret <8 x float> %res2 -} - - -define <4 x float> @test_mask_vfmadd128_ps_r(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) { -; X86-LABEL: test_mask_vfmadd128_ps_r: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1] -; X86-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_mask_vfmadd128_ps_r: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vfmadd132ps %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0x6d,0x09,0x98,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 %mask) nounwind - ret <4 x float> %res + ret <8 x float> %res } -define <4 x float> @test_mask_vfmadd128_ps_rz(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2) { -; CHECK-LABEL: test_mask_vfmadd128_ps_rz: -; CHECK: # %bb.0: -; CHECK-NEXT: vfmadd213ps %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0x71,0xa8,0xc2] -; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 -; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <4 x float> @llvm.x86.avx512.mask.vfmadd.ps.128(<4 x float> %a0, <4 x float> %a1, <4 x float> %a2, i8 -1) nounwind - ret <4 x float> %res -} define <4 x float> @test_mask_vfmadd128_ps_rmk(<4 x float> %a0, <4 x float> %a1, <4 x float>* %ptr_a2, i8 %mask) { ; X86-LABEL: test_mask_vfmadd128_ps_rmk: @@ -7851,35 +7277,6 @@ define <4 x float> @test_mask_vfmadd128_ps_rmbza(<4 x float> %a0, <4 x float> %a ret <4 x float> %res } -define <2 x double> @test_mask_vfmadd128_pd_r(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) { -; X86-LABEL: test_mask_vfmadd128_pd_r: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1] -; X86-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_mask_vfmadd128_pd_r: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vfmadd132pd %xmm1, %xmm2, %xmm0 {%k1} # encoding: [0x62,0xf2,0xed,0x09,0x98,0xc1] -; X64-NEXT: # xmm0 = (xmm0 * xmm1) + xmm2 -; X64-NEXT: retq # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 %mask) nounwind - ret <2 x double> %res -} - -define <2 x double> @test_mask_vfmadd128_pd_rz(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2) { -; CHECK-LABEL: test_mask_vfmadd128_pd_rz: -; CHECK: # %bb.0: -; CHECK-NEXT: vfmadd213pd %xmm2, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf1,0xa8,0xc2] -; CHECK-NEXT: # xmm0 = (xmm1 * xmm0) + xmm2 -; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <2 x double> @llvm.x86.avx512.mask.vfmadd.pd.128(<2 x double> %a0, <2 x double> %a1, <2 x double> %a2, i8 -1) nounwind - ret <2 x double> %res -} - define <2 x double> @test_mask_vfmadd128_pd_rmk(<2 x double> %a0, <2 x double> %a1, <2 x double>* %ptr_a2, i8 %mask) { ; X86-LABEL: test_mask_vfmadd128_pd_rmk: ; X86: # %bb.0: @@ -7919,35 +7316,6 @@ define <2 x double> @test_mask_vfmadd128_pd_rmkz(<2 x double> %a0, <2 x double> ret <2 x double> %res } -define <4 x double> @test_mask_vfmadd256_pd_r(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) { -; X86-LABEL: test_mask_vfmadd256_pd_r: -; X86: # %bb.0: -; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax # encoding: [0x0f,0xb6,0x44,0x24,0x04] -; X86-NEXT: kmovw %eax, %k1 # encoding: [0xc5,0xf8,0x92,0xc8] -; X86-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1] -; X86-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2 -; X86-NEXT: retl # encoding: [0xc3] -; -; X64-LABEL: test_mask_vfmadd256_pd_r: -; X64: # %bb.0: -; X64-NEXT: kmovw %edi, %k1 # encoding: [0xc5,0xf8,0x92,0xcf] -; X64-NEXT: vfmadd132pd %ymm1, %ymm2, %ymm0 {%k1} # encoding: [0x62,0xf2,0xed,0x29,0x98,0xc1] -; X64-NEXT: # ymm0 = (ymm0 * ymm1) + ymm2 -; X64-NEXT: retq # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 %mask) nounwind - ret <4 x double> %res -} - -define <4 x double> @test_mask_vfmadd256_pd_rz(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2) { -; CHECK-LABEL: test_mask_vfmadd256_pd_rz: -; CHECK: # %bb.0: -; CHECK-NEXT: vfmadd213pd %ymm2, %ymm1, %ymm0 # EVEX TO VEX Compression encoding: [0xc4,0xe2,0xf5,0xa8,0xc2] -; CHECK-NEXT: # ymm0 = (ymm1 * ymm0) + ymm2 -; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3] - %res = call <4 x double> @llvm.x86.avx512.mask.vfmadd.pd.256(<4 x double> %a0, <4 x double> %a1, <4 x double> %a2, i8 -1) nounwind - ret <4 x double> %res -} - define <4 x double> @test_mask_vfmadd256_pd_rmk(<4 x double> %a0, <4 x double> %a1, <4 x double>* %ptr_a2, i8 %mask) { ; X86-LABEL: test_mask_vfmadd256_pd_rmk: ; X86: # %bb.0: |

