summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll')
-rw-r--r--llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll80
1 files changed, 80 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
new file mode 100644
index 00000000000..7ad72cca60c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/double-bitmanip-dagcombines.ll
@@ -0,0 +1,80 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32I %s
+; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV32IFD %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
+; RUN: | FileCheck -check-prefix=RV64I %s
+;
+; This file tests cases where simple floating point operations can be
+; profitably handled though bit manipulation if a soft-float ABI is being used
+; (e.g. fneg implemented by XORing the sign bit). This is typically handled in
+; DAGCombiner::visitBITCAST, but this target-independent code may not trigger
+; in cases where we perform custom legalisation (e.g. RV32IFD).
+
+; TODO: Add an appropriate target-specific DAG combine that can handle
+; RISCVISD::SplitF64/BuildPairF64 used for RV32IFD.
+
+define double @fneg(double %a) nounwind {
+; RV32I-LABEL: fneg:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 524288
+; RV32I-NEXT: xor a1, a1, a2
+; RV32I-NEXT: ret
+;
+; RV32IFD-LABEL: fneg:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw a0, 8(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld ft0, 8(sp)
+; RV32IFD-NEXT: fneg.d ft0, ft0
+; RV32IFD-NEXT: fsd ft0, 8(sp)
+; RV32IFD-NEXT: lw a0, 8(sp)
+; RV32IFD-NEXT: lw a1, 12(sp)
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64I-LABEL: fneg:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, -1
+; RV64I-NEXT: slli a1, a1, 63
+; RV64I-NEXT: xor a0, a0, a1
+; RV64I-NEXT: ret
+ %1 = fneg double %a
+ ret double %1
+}
+
+declare double @llvm.fabs.f64(double)
+
+define double @fabs(double %a) nounwind {
+; RV32I-LABEL: fabs:
+; RV32I: # %bb.0:
+; RV32I-NEXT: lui a2, 524288
+; RV32I-NEXT: addi a2, a2, -1
+; RV32I-NEXT: and a1, a1, a2
+; RV32I-NEXT: ret
+;
+; RV32IFD-LABEL: fabs:
+; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw a0, 8(sp)
+; RV32IFD-NEXT: sw a1, 12(sp)
+; RV32IFD-NEXT: fld ft0, 8(sp)
+; RV32IFD-NEXT: fabs.d ft0, ft0
+; RV32IFD-NEXT: fsd ft0, 8(sp)
+; RV32IFD-NEXT: lw a0, 8(sp)
+; RV32IFD-NEXT: lw a1, 12(sp)
+; RV32IFD-NEXT: addi sp, sp, 16
+; RV32IFD-NEXT: ret
+;
+; RV64I-LABEL: fabs:
+; RV64I: # %bb.0:
+; RV64I-NEXT: addi a1, zero, -1
+; RV64I-NEXT: slli a1, a1, 63
+; RV64I-NEXT: addi a1, a1, -1
+; RV64I-NEXT: and a0, a0, a1
+; RV64I-NEXT: ret
+ %1 = call double @llvm.fabs.f64(double %a)
+ ret double %1
+}
OpenPOWER on IntegriCloud