diff options
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-intrinsics.ll')
-rw-r--r-- | llvm/test/CodeGen/RISCV/double-intrinsics.ll | 55 |
1 files changed, 30 insertions, 25 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-intrinsics.ll b/llvm/test/CodeGen/RISCV/double-intrinsics.ll index b8873883cd5..29327452ff1 100644 --- a/llvm/test/CodeGen/RISCV/double-intrinsics.ll +++ b/llvm/test/CodeGen/RISCV/double-intrinsics.ll @@ -111,24 +111,28 @@ define double @sincos_f64(double %a) nounwind { ; RV32IFD-NEXT: sw ra, 28(sp) ; RV32IFD-NEXT: sw s0, 24(sp) ; RV32IFD-NEXT: sw s1, 20(sp) +; RV32IFD-NEXT: sw s2, 16(sp) +; RV32IFD-NEXT: sw s3, 12(sp) ; RV32IFD-NEXT: mv s0, a1 ; RV32IFD-NEXT: mv s1, a0 ; RV32IFD-NEXT: call sin -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) -; RV32IFD-NEXT: fsd ft0, 0(sp) +; RV32IFD-NEXT: mv s2, a0 +; RV32IFD-NEXT: mv s3, a1 ; RV32IFD-NEXT: mv a0, s1 ; RV32IFD-NEXT: mv a1, s0 ; RV32IFD-NEXT: call cos -; RV32IFD-NEXT: sw a0, 8(sp) -; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) +; RV32IFD-NEXT: sw a0, 0(sp) +; RV32IFD-NEXT: sw a1, 4(sp) +; RV32IFD-NEXT: fld ft0, 0(sp) +; RV32IFD-NEXT: sw s2, 0(sp) +; RV32IFD-NEXT: sw s3, 4(sp) ; RV32IFD-NEXT: fld ft1, 0(sp) ; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 -; RV32IFD-NEXT: fsd ft0, 8(sp) -; RV32IFD-NEXT: lw a0, 8(sp) -; RV32IFD-NEXT: lw a1, 12(sp) +; RV32IFD-NEXT: fsd ft0, 0(sp) +; RV32IFD-NEXT: lw a0, 0(sp) +; RV32IFD-NEXT: lw a1, 4(sp) +; RV32IFD-NEXT: lw s3, 12(sp) +; RV32IFD-NEXT: lw s2, 16(sp) ; RV32IFD-NEXT: lw s1, 20(sp) ; RV32IFD-NEXT: lw s0, 24(sp) ; RV32IFD-NEXT: lw ra, 28(sp) @@ -140,16 +144,17 @@ define double @sincos_f64(double %a) nounwind { ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: sd ra, 24(sp) ; RV64IFD-NEXT: sd s0, 16(sp) +; RV64IFD-NEXT: sd s1, 8(sp) ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: call sin -; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fsd ft0, 8(sp) +; RV64IFD-NEXT: mv s1, a0 ; RV64IFD-NEXT: mv a0, s0 ; RV64IFD-NEXT: call cos ; RV64IFD-NEXT: fmv.d.x ft0, a0 -; RV64IFD-NEXT: fld ft1, 8(sp) +; RV64IFD-NEXT: fmv.d.x ft1, s1 ; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 ; RV64IFD-NEXT: fmv.x.d a0, ft0 +; RV64IFD-NEXT: ld s1, 8(sp) ; RV64IFD-NEXT: ld s0, 16(sp) ; RV64IFD-NEXT: ld ra, 24(sp) ; RV64IFD-NEXT: addi sp, sp, 32 @@ -345,17 +350,17 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; RV32IFD-LABEL: fmuladd_f64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 -; RV32IFD-NEXT: sw a4, 8(sp) -; RV32IFD-NEXT: sw a5, 12(sp) -; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a2, 8(sp) ; RV32IFD-NEXT: sw a3, 12(sp) -; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fld ft0, 8(sp) ; RV32IFD-NEXT: sw a0, 8(sp) ; RV32IFD-NEXT: sw a1, 12(sp) -; RV32IFD-NEXT: fld ft2, 8(sp) -; RV32IFD-NEXT: fmul.d ft1, ft2, ft1 -; RV32IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV32IFD-NEXT: sw a4, 8(sp) +; RV32IFD-NEXT: sw a5, 12(sp) +; RV32IFD-NEXT: fld ft1, 8(sp) +; RV32IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV32IFD-NEXT: fsd ft0, 8(sp) ; RV32IFD-NEXT: lw a0, 8(sp) ; RV32IFD-NEXT: lw a1, 12(sp) @@ -364,11 +369,11 @@ define double @fmuladd_f64(double %a, double %b, double %c) nounwind { ; ; RV64IFD-LABEL: fmuladd_f64: ; RV64IFD: # %bb.0: -; RV64IFD-NEXT: fmv.d.x ft0, a2 -; RV64IFD-NEXT: fmv.d.x ft1, a1 -; RV64IFD-NEXT: fmv.d.x ft2, a0 -; RV64IFD-NEXT: fmul.d ft1, ft2, ft1 -; RV64IFD-NEXT: fadd.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft0, a1 +; RV64IFD-NEXT: fmv.d.x ft1, a0 +; RV64IFD-NEXT: fmul.d ft0, ft1, ft0 +; RV64IFD-NEXT: fmv.d.x ft1, a2 +; RV64IFD-NEXT: fadd.d ft0, ft0, ft1 ; RV64IFD-NEXT: fmv.x.d a0, ft0 ; RV64IFD-NEXT: ret %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c) |