summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2014-08-15 18:42:18 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2014-08-15 18:42:18 +0000
commit13623d0e2877d9273f26a0ba9d059224d6f4e3f1 (patch)
tree0bb958bd2bed6030da1bb0c4e7fa2ec2af1fbe53 /llvm
parenta147438e37d009d7015bb27ca656323b82d8ae11 (diff)
downloadbcm5719-llvm-13623d0e2877d9273f26a0ba9d059224d6f4e3f1.tar.gz
bcm5719-llvm-13623d0e2877d9273f26a0ba9d059224d6f4e3f1.zip
R600/SI: Use source modifiers for f64 fneg
llvm-svn: 215748
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/R600/AMDGPUISelLowering.cpp2
-rw-r--r--llvm/lib/Target/R600/SIISelLowering.cpp32
-rw-r--r--llvm/lib/Target/R600/SIInstructions.td10
-rw-r--r--llvm/test/CodeGen/R600/fneg-fabs.f64.ll89
-rw-r--r--llvm/test/CodeGen/R600/fneg.f64.ll59
5 files changed, 186 insertions, 6 deletions
diff --git a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
index 6d7438399fe..7d4aadcc5ca 100644
--- a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -446,7 +446,7 @@ bool AMDGPUTargetLowering::isFAbsFree(EVT VT) const {
bool AMDGPUTargetLowering::isFNegFree(EVT VT) const {
assert(VT.isFloatingPoint());
- return VT == MVT::f32;
+ return VT == MVT::f32 || VT == MVT::f64;
}
bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const {
diff --git a/llvm/lib/Target/R600/SIISelLowering.cpp b/llvm/lib/Target/R600/SIISelLowering.cpp
index 508ed2a9a9a..1d5b43f5954 100644
--- a/llvm/lib/Target/R600/SIISelLowering.cpp
+++ b/llvm/lib/Target/R600/SIISelLowering.cpp
@@ -223,10 +223,6 @@ SITargetLowering::SITargetLowering(TargetMachine &TM) :
setOperationAction(ISD::FRINT, MVT::f64, Legal);
}
- // FIXME: These should be removed and handled the same was as f32 fneg. Source
- // modifiers also work for the double instructions.
- setOperationAction(ISD::FNEG, MVT::f64, Expand);
-
setOperationAction(ISD::FDIV, MVT::f32, Custom);
setTargetDAGCombine(ISD::SELECT_CC);
@@ -701,6 +697,7 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
unsigned DestReg = MI->getOperand(0).getReg();
unsigned Reg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ // FIXME: Should use SALU instructions
BuildMI(*BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), Reg)
.addImm(0x80000000);
BuildMI(*BB, I, DL, TII->get(AMDGPU::V_XOR_B32_e32), DestReg)
@@ -709,6 +706,33 @@ MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
MI->eraseFromParent();
break;
}
+ case AMDGPU::FNEG64_SI: {
+ MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
+ const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
+ getTargetMachine().getSubtargetImpl()->getInstrInfo());
+
+ DebugLoc DL = MI->getDebugLoc();
+ unsigned SrcReg = MI->getOperand(1).getReg();
+ unsigned DestReg = MI->getOperand(0).getReg();
+
+ unsigned TmpReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+ unsigned ImmReg = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
+
+ // FIXME: Should use SALU instructions
+ BuildMI(*BB, I, DL, TII->get(AMDGPU::V_MOV_B32_e32), ImmReg)
+ .addImm(0x80000000);
+ BuildMI(*BB, I, DL, TII->get(AMDGPU::V_XOR_B32_e32), TmpReg)
+ .addReg(SrcReg, 0, AMDGPU::sub1)
+ .addReg(ImmReg);
+
+ BuildMI(*BB, I, DL, TII->get(AMDGPU::REG_SEQUENCE), DestReg)
+ .addReg(SrcReg, 0, AMDGPU::sub0)
+ .addImm(AMDGPU::sub0)
+ .addReg(TmpReg)
+ .addImm(AMDGPU::sub1);
+ MI->eraseFromParent();
+ break;
+ }
case AMDGPU::FCLAMP_SI: {
const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
getTargetMachine().getSubtargetImpl()->getInstrInfo());
diff --git a/llvm/lib/Target/R600/SIInstructions.td b/llvm/lib/Target/R600/SIInstructions.td
index af8d3b3d080..8d2c212dc15 100644
--- a/llvm/lib/Target/R600/SIInstructions.td
+++ b/llvm/lib/Target/R600/SIInstructions.td
@@ -2328,12 +2328,20 @@ def : Pat <
// TODO: Look into not implementing isFNegFree/isFAbsFree for SI, and possibly
// removing these patterns
-
def : Pat <
(fneg (fabs f32:$src)),
(V_OR_B32_e32 $src, (V_MOV_B32_e32 0x80000000)) /* Set sign bit */
>;
+def : Pat <
+ (fneg (fabs f64:$src)),
+ (f64 (INSERT_SUBREG
+ (INSERT_SUBREG (f64 (IMPLICIT_DEF)),
+ (i32 (EXTRACT_SUBREG f64:$src, sub0)), sub0),
+ (V_OR_B32_e32 (S_MOV_B32 0x80000000),
+ (EXTRACT_SUBREG f64:$src, sub1)), sub1)) // Set sign bit.
+>;
+
class SIUnaryCustomInsertInst<string name, SDPatternOperator node,
ValueType vt,
RegisterClass dstrc,
diff --git a/llvm/test/CodeGen/R600/fneg-fabs.f64.ll b/llvm/test/CodeGen/R600/fneg-fabs.f64.ll
new file mode 100644
index 00000000000..42117b5daf7
--- /dev/null
+++ b/llvm/test/CodeGen/R600/fneg-fabs.f64.ll
@@ -0,0 +1,89 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FIXME: Check something here. Currently it seems fabs + fneg aren't
+; into 2 modifiers, although theoretically that should work.
+
+; FUNC-LABEL: @fneg_fabs_fadd_f64
+; SI: V_AND_B32_e32 v[[FABS:[0-9]+]], 0x7fffffff, {{v[0-9]+}}
+; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -v{{\[[0-9]+}}:[[FABS]]{{\]}}
+define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) {
+ %fabs = call double @llvm.fabs.f64(double %x)
+ %fsub = fsub double -0.000000e+00, %fabs
+ %fadd = fadd double %y, %fsub
+ store double %fadd, double addrspace(1)* %out, align 8
+ ret void
+}
+
+define void @v_fneg_fabs_fadd_f64(double addrspace(1)* %out, double addrspace(1)* %xptr, double addrspace(1)* %yptr) {
+ %x = load double addrspace(1)* %xptr, align 8
+ %y = load double addrspace(1)* %xptr, align 8
+ %fabs = call double @llvm.fabs.f64(double %x)
+ %fsub = fsub double -0.000000e+00, %fabs
+ %fadd = fadd double %y, %fsub
+ store double %fadd, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_fmul_f64
+; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}|
+define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) {
+ %fabs = call double @llvm.fabs.f64(double %x)
+ %fsub = fsub double -0.000000e+00, %fabs
+ %fmul = fmul double %y, %fsub
+ store double %fmul, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_free_f64
+define void @fneg_fabs_free_f64(double addrspace(1)* %out, i64 %in) {
+ %bc = bitcast i64 %in to double
+ %fabs = call double @llvm.fabs.f64(double %bc)
+ %fsub = fsub double -0.000000e+00, %fabs
+ store double %fsub, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_fn_free_f64
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+define void @fneg_fabs_fn_free_f64(double addrspace(1)* %out, i64 %in) {
+ %bc = bitcast i64 %in to double
+ %fabs = call double @fabs(double %bc)
+ %fsub = fsub double -0.000000e+00, %fabs
+ store double %fsub, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_f64
+define void @fneg_fabs_f64(double addrspace(1)* %out, double %in) {
+ %fabs = call double @llvm.fabs.f64(double %in)
+ %fsub = fsub double -0.000000e+00, %fabs
+ store double %fsub, double addrspace(1)* %out, align 8
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_v2f64
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+define void @fneg_fabs_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %in) {
+ %fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %in)
+ %fsub = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %fabs
+ store <2 x double> %fsub, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fneg_fabs_v4f64
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+; SI: V_OR_B32_e32 v{{[0-9]+}}, 0x80000000, v{{[0-9]+}}
+define void @fneg_fabs_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %in) {
+ %fabs = call <4 x double> @llvm.fabs.v4f64(<4 x double> %in)
+ %fsub = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %fabs
+ store <4 x double> %fsub, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+declare double @fabs(double) readnone
+declare double @llvm.fabs.f64(double) readnone
+declare <2 x double> @llvm.fabs.v2f64(<2 x double>) readnone
+declare <4 x double> @llvm.fabs.v4f64(<4 x double>) readnone
diff --git a/llvm/test/CodeGen/R600/fneg.f64.ll b/llvm/test/CodeGen/R600/fneg.f64.ll
new file mode 100644
index 00000000000..61d95135a4a
--- /dev/null
+++ b/llvm/test/CodeGen/R600/fneg.f64.ll
@@ -0,0 +1,59 @@
+; RUN: llc -march=r600 -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
+
+; FUNC-LABEL: @fneg_f64
+; SI: V_XOR_B32
+define void @fneg_f64(double addrspace(1)* %out, double %in) {
+ %fneg = fsub double -0.000000e+00, %in
+ store double %fneg, double addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fneg_v2f64
+; SI: V_XOR_B32
+; SI: V_XOR_B32
+define void @fneg_v2f64(<2 x double> addrspace(1)* nocapture %out, <2 x double> %in) {
+ %fneg = fsub <2 x double> <double -0.000000e+00, double -0.000000e+00>, %in
+ store <2 x double> %fneg, <2 x double> addrspace(1)* %out
+ ret void
+}
+
+; FUNC-LABEL: @fneg_v4f64
+; R600: -PV
+; R600: -T
+; R600: -PV
+; R600: -PV
+
+; SI: V_XOR_B32
+; SI: V_XOR_B32
+; SI: V_XOR_B32
+; SI: V_XOR_B32
+define void @fneg_v4f64(<4 x double> addrspace(1)* nocapture %out, <4 x double> %in) {
+ %fneg = fsub <4 x double> <double -0.000000e+00, double -0.000000e+00, double -0.000000e+00, double -0.000000e+00>, %in
+ store <4 x double> %fneg, <4 x double> addrspace(1)* %out
+ ret void
+}
+
+; DAGCombiner will transform:
+; (fneg (f64 bitcast (i64 a))) => (f64 bitcast (xor (i64 a), 0x80000000))
+; unless the target returns true for isNegFree()
+
+; FUNC-LABEL: @fneg_free_f64
+; FIXME: Unnecessary copy to VGPRs
+; SI: V_ADD_F64 {{v\[[0-9]+:[0-9]+\]}}, {{v\[[0-9]+:[0-9]+\]}}, -{{v\[[0-9]+:[0-9]+\]}}, 0, 0
+define void @fneg_free_f64(double addrspace(1)* %out, i64 %in) {
+ %bc = bitcast i64 %in to double
+ %fsub = fsub double 0.0, %bc
+ store double %fsub, double addrspace(1)* %out
+ ret void
+}
+
+; SI-LABEL: @fneg_fold
+; SI: S_LOAD_DWORDX2 [[NEG_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb
+; SI-NOT: XOR
+; SI: V_MUL_F64 {{v\[[0-9]+:[0-9]+\]}}, -[[NEG_VALUE]], {{v\[[0-9]+:[0-9]+\]}}
+define void @fneg_fold_f64(double addrspace(1)* %out, double %in) {
+ %fsub = fsub double -0.0, %in
+ %fmul = fmul double %fsub, %in
+ store double %fmul, double addrspace(1)* %out
+ ret void
+}
OpenPOWER on IntegriCloud