summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib/Target')
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp16
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.h8
-rw-r--r--llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/ARM64/ARM64ISelLowering.cpp8
-rw-r--r--llvm/lib/Target/ARM64/ARM64ISelLowering.h8
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp24
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.h10
-rw-r--r--llvm/lib/Target/R600/AMDGPUISelLowering.cpp28
-rw-r--r--llvm/lib/Target/R600/AMDGPUISelLowering.h10
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp6
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.h12
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp6
-rw-r--r--llvm/lib/Target/SystemZ/SystemZISelLowering.cpp4
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp14
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h12
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td4
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp18
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.h10
20 files changed, 106 insertions, 106 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index e7ffeee8f92..d5617dd66ee 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -9520,7 +9520,7 @@ ARMTargetLowering::PerformCMOVCombine(SDNode *N, SelectionDAG &DAG) const {
if (Res.getNode()) {
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(SDValue(N,0), KnownZero, KnownOne);
+ DAG.computeKnownBits(SDValue(N,0), KnownZero, KnownOne);
// Capture demanded bits information that would be otherwise lost.
if (KnownZero == 0xfffffffe)
Res = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Res,
@@ -10107,11 +10107,11 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
return true;
}
-void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
unsigned BitWidth = KnownOne.getBitWidth();
KnownZero = KnownOne = APInt(BitWidth, 0);
switch (Op.getOpcode()) {
@@ -10127,11 +10127,11 @@ void ARMTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
break;
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth+1);
if (KnownZero == 0 && KnownOne == 0) return;
APInt KnownZeroRHS, KnownOneRHS;
- DAG.ComputeMaskedBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(1), KnownZeroRHS, KnownOneRHS, Depth+1);
KnownZero &= KnownZeroRHS;
KnownOne &= KnownOneRHS;
return;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index 0175c24b735..c15305c5e60 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -313,10 +313,10 @@ namespace llvm {
SDValue &Offset, ISD::MemIndexedMode &AM,
SelectionDAG &DAG) const override;
- void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
bool ExpandInlineAsm(CallInst *CI) const override;
diff --git a/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp b/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
index 050663d9812..2e84a268de9 100644
--- a/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/ARM64/ARM64ISelDAGToDAG.cpp
@@ -1731,14 +1731,14 @@ static bool isBitfieldPositioningOp(SelectionDAG *CurDAG, SDValue Op,
assert(BitWidth == 32 || BitWidth == 64);
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Op, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Op, KnownZero, KnownOne);
// Non-zero in the sense that they're not provably zero, which is the key
// point if we want to use this value
uint64_t NonZeroBits = (~KnownZero).getZExtValue();
// Discard a constant AND mask if present. It's safe because the node will
- // already have been factored into the ComputeMaskedBits calculation above.
+ // already have been factored into the computeKnownBits calculation above.
uint64_t AndImm;
if (isOpcWithIntImmediate(Op.getNode(), ISD::AND, AndImm)) {
assert((~APInt(BitWidth, AndImm) & ~KnownZero) == 0);
@@ -1839,7 +1839,7 @@ static bool isBitfieldInsertOpFromOr(SDNode *N, unsigned &Opc, SDValue &Dst,
// AND with imm. Indeed, simplify-demanded-bits may have removed
// the AND instruction because it proves it was useless.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(OrOpd1Val, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(OrOpd1Val, KnownZero, KnownOne);
// Check if there is enough room for the second operand to appear
// in the first one
diff --git a/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp b/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
index 546343a0feb..f00e829070a 100644
--- a/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
+++ b/llvm/lib/Target/ARM64/ARM64ISelLowering.cpp
@@ -562,10 +562,10 @@ EVT ARM64TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
return VT.changeVectorElementTypeToInteger();
}
-/// computeMaskedBitsForTargetNode - Determine which of the bits specified in
+/// computeKnownBitsForTargetNode - Determine which of the bits specified in
/// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
-void ARM64TargetLowering::computeMaskedBitsForTargetNode(
+void ARM64TargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, APInt &KnownZero, APInt &KnownOne,
const SelectionDAG &DAG, unsigned Depth) const {
switch (Op.getOpcode()) {
@@ -573,8 +573,8 @@ void ARM64TargetLowering::computeMaskedBitsForTargetNode(
break;
case ARM64ISD::CSEL: {
APInt KnownZero2, KnownOne2;
- DAG.ComputeMaskedBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1);
- DAG.ComputeMaskedBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1);
+ DAG.computeKnownBits(Op->getOperand(0), KnownZero, KnownOne, Depth + 1);
+ DAG.computeKnownBits(Op->getOperand(1), KnownZero2, KnownOne2, Depth + 1);
KnownZero &= KnownZero2;
KnownOne &= KnownOne2;
break;
diff --git a/llvm/lib/Target/ARM64/ARM64ISelLowering.h b/llvm/lib/Target/ARM64/ARM64ISelLowering.h
index 7292e06da77..fd29a51b40e 100644
--- a/llvm/lib/Target/ARM64/ARM64ISelLowering.h
+++ b/llvm/lib/Target/ARM64/ARM64ISelLowering.h
@@ -201,12 +201,12 @@ public:
/// value.
CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg) const;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified in
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified in
/// Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- void computeMaskedBitsForTargetNode(const SDValue Op, APInt &KnownZero,
- APInt &KnownOne, const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op, APInt &KnownZero,
+ APInt &KnownOne, const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
MVT getScalarShiftAmountTy(EVT LHSTy) const override;
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 37b3137bd34..22c835f90b0 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -415,8 +415,8 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
SDLoc dl(N);
APInt LKZ, LKO, RKZ, RKO;
- CurDAG->ComputeMaskedBits(Op0, LKZ, LKO);
- CurDAG->ComputeMaskedBits(Op1, RKZ, RKO);
+ CurDAG->computeKnownBits(Op0, LKZ, LKO);
+ CurDAG->computeKnownBits(Op1, RKZ, RKO);
unsigned TargetMask = LKZ.getZExtValue();
unsigned InsertMask = RKZ.getZExtValue();
@@ -463,7 +463,7 @@ SDNode *PPCDAGToDAGISel::SelectBitfieldInsert(SDNode *N) {
// if we're going to fold the masking with the insert, all bits not
// know to be zero in the mask are known to be one.
APInt MKZ, MKO;
- CurDAG->ComputeMaskedBits(Op1.getOperand(1), MKZ, MKO);
+ CurDAG->computeKnownBits(Op1.getOperand(1), MKZ, MKO);
bool CanFoldMask = InsertMask == MKO.getZExtValue();
unsigned SHOpc = Op1.getOperand(0).getOpcode();
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 2fd5e1061a1..b31a031834d 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1175,12 +1175,12 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
// disjoint.
APInt LHSKnownZero, LHSKnownOne;
APInt RHSKnownZero, RHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0),
- LHSKnownZero, LHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(0),
+ LHSKnownZero, LHSKnownOne);
if (LHSKnownZero.getBoolValue()) {
- DAG.ComputeMaskedBits(N.getOperand(1),
- RHSKnownZero, RHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(1),
+ RHSKnownZero, RHSKnownOne);
// If all of the bits are known zero on the LHS or RHS, the add won't
// carry.
if (~(LHSKnownZero | RHSKnownZero) == 0) {
@@ -1280,7 +1280,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
// (for better address arithmetic) if the LHS and RHS of the OR are
// provably disjoint.
APInt LHSKnownZero, LHSKnownOne;
- DAG.ComputeMaskedBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
+ DAG.computeKnownBits(N.getOperand(0), LHSKnownZero, LHSKnownOne);
if ((LHSKnownZero.getZExtValue()|~(uint64_t)imm) == ~0ULL) {
// If all of the bits are known zero on the LHS or RHS, the add won't
@@ -7355,8 +7355,8 @@ SDValue PPCTargetLowering::DAGCombineTruncBoolExt(SDNode *N,
// that the high bits are equal.
APInt Op1Zero, Op1One;
APInt Op2Zero, Op2One;
- DAG.ComputeMaskedBits(N->getOperand(0), Op1Zero, Op1One);
- DAG.ComputeMaskedBits(N->getOperand(1), Op2Zero, Op2One);
+ DAG.computeKnownBits(N->getOperand(0), Op1Zero, Op1One);
+ DAG.computeKnownBits(N->getOperand(1), Op2Zero, Op2One);
// We don't really care about what is known about the first bit (if
// anything), so clear it in all masks prior to comparing them.
@@ -8406,11 +8406,11 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
// Inline Assembly Support
//===----------------------------------------------------------------------===//
-void PPCTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 98bdf266438..080ef5d0f76 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -400,11 +400,11 @@ namespace llvm {
unsigned getRegisterByName(const char* RegName, EVT VT) const override;
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
diff --git a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
index 2d2f4f4e6d4..185201d1fb6 100644
--- a/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/R600/AMDGPUISelLowering.cpp
@@ -1223,7 +1223,7 @@ SDValue AMDGPUTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
static bool isU24(SDValue Op, SelectionDAG &DAG) {
APInt KnownZero, KnownOne;
EVT VT = Op.getValueType();
- DAG.ComputeMaskedBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, KnownZero, KnownOne);
return (VT.getSizeInBits() - KnownZero.countLeadingOnes()) <= 24;
}
@@ -1416,22 +1416,22 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
}
}
-static void computeMaskedBitsForMinMax(const SDValue Op0,
- const SDValue Op1,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) {
+static void computeKnownBitsForMinMax(const SDValue Op0,
+ const SDValue Op1,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) {
APInt Op0Zero, Op0One;
APInt Op1Zero, Op1One;
- DAG.ComputeMaskedBits(Op0, Op0Zero, Op0One, Depth);
- DAG.ComputeMaskedBits(Op1, Op1Zero, Op1One, Depth);
+ DAG.computeKnownBits(Op0, Op0Zero, Op0One, Depth);
+ DAG.computeKnownBits(Op1, Op1Zero, Op1One, Depth);
KnownZero = Op0Zero & Op1Zero;
KnownOne = Op0One & Op1One;
}
-void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
+void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
@@ -1448,8 +1448,8 @@ void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
case AMDGPUIntrinsic::AMDGPU_umax:
case AMDGPUIntrinsic::AMDGPU_imin:
case AMDGPUIntrinsic::AMDGPU_umin:
- computeMaskedBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
- KnownZero, KnownOne, DAG, Depth);
+ computeKnownBitsForMinMax(Op.getOperand(1), Op.getOperand(2),
+ KnownZero, KnownOne, DAG, Depth);
break;
default:
break;
@@ -1461,8 +1461,8 @@ void AMDGPUTargetLowering::computeMaskedBitsForTargetNode(
case AMDGPUISD::UMAX:
case AMDGPUISD::SMIN:
case AMDGPUISD::UMIN:
- computeMaskedBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
- KnownZero, KnownOne, DAG, Depth);
+ computeKnownBitsForMinMax(Op.getOperand(0), Op.getOperand(1),
+ KnownZero, KnownOne, DAG, Depth);
break;
default:
break;
diff --git a/llvm/lib/Target/R600/AMDGPUISelLowering.h b/llvm/lib/Target/R600/AMDGPUISelLowering.h
index 4a2dad34af0..2552e807be8 100644
--- a/llvm/lib/Target/R600/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/R600/AMDGPUISelLowering.h
@@ -118,11 +118,11 @@ public:
/// \brief Determine which of the bits specified in \p Mask are known to be
/// either zero or one and return them in the \p KnownZero and \p KnownOne
/// bitsets.
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
// Functions defined in AMDILISelLowering.cpp
public:
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index e733dd8df32..b934c251f10 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1708,7 +1708,7 @@ EVT SparcTargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
/// be zero. Op is expected to be a target specific node. Used by DAG
/// combiner.
-void SparcTargetLowering::computeMaskedBitsForTargetNode
+void SparcTargetLowering::computeKnownBitsForTargetNode
(const SDValue Op,
APInt &KnownZero,
APInt &KnownOne,
@@ -1722,8 +1722,8 @@ void SparcTargetLowering::computeMaskedBitsForTargetNode
case SPISD::SELECT_ICC:
case SPISD::SELECT_XCC:
case SPISD::SELECT_FCC:
- DAG.ComputeMaskedBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
- DAG.ComputeMaskedBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(1), KnownZero, KnownOne, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
assert((KnownZero2 & KnownOne2) == 0 && "Bits known to be one AND zero?");
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.h b/llvm/lib/Target/Sparc/SparcISelLowering.h
index 6fb894026ad..a24cc82eecb 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.h
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -57,14 +57,14 @@ namespace llvm {
SparcTargetLowering(TargetMachine &TM);
SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
diff --git a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
index c2600064744..24f7584ae9c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelDAGToDAG.cpp
@@ -665,7 +665,7 @@ bool SystemZDAGToDAGISel::detectOrAndInsertion(SDValue &Op,
uint64_t Used = allOnes(Op.getValueType().getSizeInBits());
if (Used != (AndMask | InsertMask)) {
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Op.getOperand(0), KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Op.getOperand(0), KnownZero, KnownOne);
if (Used != (AndMask | InsertMask | KnownZero.getZExtValue()))
return false;
}
@@ -714,7 +714,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask |= KnownZero.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
@@ -738,7 +738,7 @@ bool SystemZDAGToDAGISel::expandRxSBG(RxSBGOperands &RxSBG) const {
// been removed from the mask. See if adding them back in makes the
// mask suitable.
APInt KnownZero, KnownOne;
- CurDAG->ComputeMaskedBits(Input, KnownZero, KnownOne);
+ CurDAG->computeKnownBits(Input, KnownZero, KnownOne);
Mask &= ~KnownOne.getZExtValue();
if (!refineRxSBGMask(RxSBG, Mask))
return false;
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index a99647a01fc..6fe1fb9f7d3 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -2125,8 +2125,8 @@ SDValue SystemZTargetLowering::lowerOR(SDValue Op, SelectionDAG &DAG) const {
// Get the known-zero masks for each operand.
SDValue Ops[] = { Op.getOperand(0), Op.getOperand(1) };
APInt KnownZero[2], KnownOne[2];
- DAG.ComputeMaskedBits(Ops[0], KnownZero[0], KnownOne[0]);
- DAG.ComputeMaskedBits(Ops[1], KnownZero[1], KnownOne[1]);
+ DAG.computeKnownBits(Ops[0], KnownZero[0], KnownOne[0]);
+ DAG.computeKnownBits(Ops[1], KnownZero[1], KnownOne[1]);
// See if the upper 32 bits of one operand and the lower 32 bits of the
// other are known zero. They are the low and high operands respectively.
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index c9c227a0a45..03c9620db4a 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -927,7 +927,7 @@ static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
APInt MaskedHighBits =
APInt::getHighBitsSet(X.getSimpleValueType().getSizeInBits(), MaskLZ);
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
+ DAG.computeKnownBits(X, KnownZero, KnownOne);
if (MaskedHighBits != KnownZero) return true;
// We've identified a pattern that can be transformed into a single shift
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bae3e05d657..6e327aa1326 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -10217,7 +10217,7 @@ SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
unsigned AndBitWidth = And.getValueSizeInBits();
if (BitWidth > AndBitWidth) {
APInt Zeros, Ones;
- DAG.ComputeMaskedBits(Op0, Zeros, Ones);
+ DAG.computeKnownBits(Op0, Zeros, Ones);
if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
return SDValue();
}
@@ -17103,11 +17103,11 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
// X86 Optimization Hooks
//===----------------------------------------------------------------------===//
-void X86TargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
unsigned BitWidth = KnownZero.getBitWidth();
unsigned Opc = Op.getOpcode();
assert((Opc >= ISD::BUILTIN_OP_END ||
@@ -17973,7 +17973,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
// Another special case: If C was a sign bit, the sub has been
// canonicalized into a xor.
- // FIXME: Would it be better to use ComputeMaskedBits to determine whether
+ // FIXME: Would it be better to use computeKnownBits to determine whether
// it's safe to decanonicalize the xor?
// x s< 0 ? x^C : 0 --> subus x, C
if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 7d8a10c187c..48b14b04fe1 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -618,14 +618,14 @@ namespace llvm {
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 401849f40ec..34d8fb9e181 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -1187,9 +1187,9 @@ def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
APInt KnownZero0, KnownOne0;
- CurDAG->ComputeMaskedBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
+ CurDAG->computeKnownBits(N->getOperand(0), KnownZero0, KnownOne0, 0);
APInt KnownZero1, KnownOne1;
- CurDAG->ComputeMaskedBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
+ CurDAG->computeKnownBits(N->getOperand(1), KnownZero1, KnownOne1, 0);
return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 1c52c70afe7..3cd1b91ae4c 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -434,7 +434,7 @@ lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
{
APInt KnownZero, KnownOne;
- DAG.ComputeMaskedBits(Value, KnownZero, KnownOne);
+ DAG.computeKnownBits(Value, KnownZero, KnownOne);
return KnownZero.countTrailingOnes() >= 2;
}
@@ -1699,7 +1699,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Carry = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
@@ -1722,7 +1722,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Borrow = N2;
SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
@@ -1738,7 +1738,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
APInt KnownZero, KnownOne;
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
VT.getSizeInBits() - 1);
- DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
+ DAG.computeKnownBits(N2, KnownZero, KnownOne);
if ((KnownZero & Mask) == Mask) {
SDValue Borrow = DAG.getConstant(0, VT);
SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
@@ -1860,11 +1860,11 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
return SDValue();
}
-void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth) const {
+void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
switch (Op.getOpcode()) {
default: break;
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.h b/llvm/lib/Target/XCore/XCoreISelLowering.h
index 4e662fc4ccb..d28715b7178 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.h
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.h
@@ -183,11 +183,11 @@ namespace llvm {
SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
SDValue
LowerFormalArguments(SDValue Chain,
OpenPOWER on IntegriCloud