summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorMatt Arsenault <Matthew.Arsenault@amd.com>2017-11-07 00:06:32 +0000
committerMatt Arsenault <Matthew.Arsenault@amd.com>2017-11-07 00:06:32 +0000
commit6119f80034deca6c53fdeca746e3cb654ebad0c5 (patch)
tree6e7feeca8b8317ad609ec22e64793d17b3d6c1c2 /llvm/lib
parent7957b08e87ce314ca644e079a481ac3819f943bc (diff)
downloadbcm5719-llvm-6119f80034deca6c53fdeca746e3cb654ebad0c5.tar.gz
bcm5719-llvm-6119f80034deca6c53fdeca746e3cb654ebad0c5.zip
AMDGPU: Remove redundant combine
This combine was already done in two places. The generic combiner already has done this since r217610, for adds (with a single use). This one was added in r303641, and added support for handling or as well. r313251 later added support to the generic combine for or. It also turns out the isOrEquivalentToAdd check is not necessary for this combine. Additionally, we already reproduce this combine in yet another place in the backend, although in that version multiple uses of the add are still folded if it will allow a fold into the addressing mode. That version needs to be improved to understand ors though, as well as the correct legal offsets for private. llvm-svn: 317526
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp38
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h1
2 files changed, 0 insertions, 39 deletions
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index af22d523cf8..d502b77447d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -128,29 +128,6 @@ EVT AMDGPUTargetLowering::getEquivalentMemType(LLVMContext &Ctx, EVT VT) {
return EVT::getVectorVT(Ctx, MVT::i32, StoreSize / 32);
}
-bool AMDGPUTargetLowering::isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op)
-{
- assert(Op.getOpcode() == ISD::OR);
-
- SDValue N0 = Op->getOperand(0);
- SDValue N1 = Op->getOperand(1);
- EVT VT = N0.getValueType();
-
- if (VT.isInteger() && !VT.isVector()) {
- KnownBits LHSKnown, RHSKnown;
- DAG.computeKnownBits(N0, LHSKnown);
-
- if (LHSKnown.Zero.getBoolValue()) {
- DAG.computeKnownBits(N1, RHSKnown);
-
- if (!(~RHSKnown.Zero & ~LHSKnown.Zero))
- return true;
- }
- }
-
- return false;
-}
-
unsigned AMDGPUTargetLowering::numBitsUnsigned(SDValue Op, SelectionDAG &DAG) {
KnownBits Known;
EVT VT = Op.getValueType();
@@ -2923,21 +2900,6 @@ SDValue AMDGPUTargetLowering::performShlCombine(SDNode *N,
SDValue Shl = DAG.getNode(ISD::SHL, SL, XVT, X, SDValue(RHS, 0));
return DAG.getZExtOrTrunc(Shl, SL, VT);
}
- case ISD::OR:
- if (!isOrEquivalentToAdd(DAG, LHS))
- break;
- LLVM_FALLTHROUGH;
- case ISD::ADD: {
- // shl (or|add x, c2), c1 => or|add (shl x, c1), (c2 << c1)
- if (ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(LHS->getOperand(1))) {
- SDValue Shl = DAG.getNode(ISD::SHL, SL, VT, LHS->getOperand(0),
- SDValue(RHS, 0));
- SDValue C2V = DAG.getConstant(C2->getAPIntValue() << RHSVal,
- SDLoc(C2), VT);
- return DAG.getNode(LHS->getOpcode(), SL, VT, Shl, C2V);
- }
- break;
- }
}
if (VT != MVT::i64)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index dd3cc0a43c7..ba35aeb90ed 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -35,7 +35,6 @@ private:
SDValue getFFBX_U32(SelectionDAG &DAG, SDValue Op, const SDLoc &DL, unsigned Opc) const;
public:
- static bool isOrEquivalentToAdd(SelectionDAG &DAG, SDValue Op);
static unsigned numBitsUnsigned(SDValue Op, SelectionDAG &DAG);
static unsigned numBitsSigned(SDValue Op, SelectionDAG &DAG);
OpenPOWER on IntegriCloud