summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/NVPTX
diff options
context:
space:
mode:
authorJustin Lebar <jlebar@google.com>2017-01-18 00:09:01 +0000
committerJustin Lebar <jlebar@google.com>2017-01-18 00:09:01 +0000
commitcc938fc197ce53694667e47d183f0aae2219c6c9 (patch)
treecca7da71d94dab6f7e4d75cb1312a7cc44447dc7 /llvm/lib/Target/NVPTX
parent7dc3d6c3415db5f30d4acafc725a50debdac3bf7 (diff)
downloadbcm5719-llvm-cc938fc197ce53694667e47d183f0aae2219c6c9.tar.gz
bcm5719-llvm-cc938fc197ce53694667e47d183f0aae2219c6c9.zip
[NVPTX] Implement min/max in tablegen, rather than with custom DAGComine logic.
Summary: This change also lets us use max.{s,u}16. There's a vague warning in a test about this maybe being less efficient, but I could not come up with a case where the resulting SASS (sm_35 or sm_60) was different with or without max.{s,u}16. It's true that nvcc seems to emit only max.{s,u}32, but even ptxas 7.0 seems to have no problem generating efficient SASS from max.{s,u}16 (the casts up to i32 and back down to i16 seem to be implicit and nops, happening via register aliasing). In the absence of evidence, better to have fewer special cases, emit more straightforward code, etc. In particular, if a new GPU has 16-bit min/max instructions, we want to be able to use them. Reviewers: tra Subscribers: jholewinski, llvm-commits Differential Revision: https://reviews.llvm.org/D28732 llvm-svn: 292304
Diffstat (limited to 'llvm/lib/Target/NVPTX')
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp80
-rw-r--r--llvm/lib/Target/NVPTX/NVPTXInstrInfo.td6
2 files changed, 16 insertions, 70 deletions
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 1691f21619b..0fd8d4b2aa7 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -290,15 +290,19 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
// Custom handling for i8 intrinsics
setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
- setOperationAction(ISD::CTLZ, MVT::i16, Legal);
- setOperationAction(ISD::CTLZ, MVT::i32, Legal);
- setOperationAction(ISD::CTLZ, MVT::i64, Legal);
+ for (const auto& Ty : {MVT::i16, MVT::i32, MVT::i64}) {
+ setOperationAction(ISD::SMIN, Ty, Legal);
+ setOperationAction(ISD::SMAX, Ty, Legal);
+ setOperationAction(ISD::UMIN, Ty, Legal);
+ setOperationAction(ISD::UMAX, Ty, Legal);
+
+ setOperationAction(ISD::CTPOP, Ty, Legal);
+ setOperationAction(ISD::CTLZ, Ty, Legal);
+ }
+
setOperationAction(ISD::CTTZ, MVT::i16, Expand);
setOperationAction(ISD::CTTZ, MVT::i32, Expand);
setOperationAction(ISD::CTTZ, MVT::i64, Expand);
- setOperationAction(ISD::CTPOP, MVT::i16, Legal);
- setOperationAction(ISD::CTPOP, MVT::i32, Legal);
- setOperationAction(ISD::CTPOP, MVT::i64, Legal);
// PTX does not directly support SELP of i1, so promote to i32 first
setOperationAction(ISD::SELECT, MVT::i1, Custom);
@@ -313,7 +317,6 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
setTargetDAGCombine(ISD::FADD);
setTargetDAGCombine(ISD::MUL);
setTargetDAGCombine(ISD::SHL);
- setTargetDAGCombine(ISD::SELECT);
setTargetDAGCombine(ISD::SREM);
setTargetDAGCombine(ISD::UREM);
@@ -4159,67 +4162,6 @@ static SDValue PerformANDCombine(SDNode *N,
return SDValue();
}
-static SDValue PerformSELECTCombine(SDNode *N,
- TargetLowering::DAGCombinerInfo &DCI) {
- // Currently this detects patterns for integer min and max and
- // lowers them to PTX-specific intrinsics that enable hardware
- // support.
-
- const SDValue Cond = N->getOperand(0);
- if (Cond.getOpcode() != ISD::SETCC) return SDValue();
-
- const SDValue LHS = Cond.getOperand(0);
- const SDValue RHS = Cond.getOperand(1);
- const SDValue True = N->getOperand(1);
- const SDValue False = N->getOperand(2);
- if (!(LHS == True && RHS == False) && !(LHS == False && RHS == True))
- return SDValue();
-
- const EVT VT = N->getValueType(0);
- if (VT != MVT::i32 && VT != MVT::i64) return SDValue();
-
- const ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
- SDValue Larger; // The larger of LHS and RHS when condition is true.
- switch (CC) {
- case ISD::SETULT:
- case ISD::SETULE:
- case ISD::SETLT:
- case ISD::SETLE:
- Larger = RHS;
- break;
-
- case ISD::SETGT:
- case ISD::SETGE:
- case ISD::SETUGT:
- case ISD::SETUGE:
- Larger = LHS;
- break;
-
- default:
- return SDValue();
- }
- const bool IsMax = (Larger == True);
- const bool IsSigned = ISD::isSignedIntSetCC(CC);
-
- unsigned IntrinsicId;
- if (VT == MVT::i32) {
- if (IsSigned)
- IntrinsicId = IsMax ? Intrinsic::nvvm_max_i : Intrinsic::nvvm_min_i;
- else
- IntrinsicId = IsMax ? Intrinsic::nvvm_max_ui : Intrinsic::nvvm_min_ui;
- } else {
- assert(VT == MVT::i64);
- if (IsSigned)
- IntrinsicId = IsMax ? Intrinsic::nvvm_max_ll : Intrinsic::nvvm_min_ll;
- else
- IntrinsicId = IsMax ? Intrinsic::nvvm_max_ull : Intrinsic::nvvm_min_ull;
- }
-
- SDLoc DL(N);
- return DCI.DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT,
- DCI.DAG.getConstant(IntrinsicId, DL, VT), LHS, RHS);
-}
-
static SDValue PerformREMCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
CodeGenOpt::Level OptLevel) {
@@ -4429,8 +4371,6 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
return PerformSHLCombine(N, DCI, OptLevel);
case ISD::AND:
return PerformANDCombine(N, DCI);
- case ISD::SELECT:
- return PerformSELECTCombine(N, DCI);
case ISD::UREM:
case ISD::SREM:
return PerformREMCombine(N, DCI, OptLevel);
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index ac7b7e0b9f4..98e30f995cb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -529,6 +529,12 @@ defm ABS_16 : ABS<Int16Regs, 15, ".s16">;
defm ABS_32 : ABS<Int32Regs, 31, ".s32">;
defm ABS_64 : ABS<Int64Regs, 63, ".s64">;
+// Integer min/max.
+defm SMAX : I3<"max.s", smax>;
+defm UMAX : I3<"max.u", umax>;
+defm SMIN : I3<"min.s", smin>;
+defm UMIN : I3<"min.u", umin>;
+
//
// Wide multiplication
//
OpenPOWER on IntegriCloud