summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/Lint.cpp4
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp50
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp9
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp2
-rw-r--r--llvm/lib/Target/ARM/ARMISelLowering.cpp5
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelLowering.cpp2
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp2
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp4
-rw-r--r--llvm/lib/Target/XCore/XCoreISelLowering.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp2
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp3
13 files changed, 36 insertions, 53 deletions
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 59813824644..471ccb62970 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -537,7 +537,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
unsigned BitWidth = V->getType()->getIntegerBitWidth();
KnownBits Known(BitWidth);
computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
- return Known.Zero.isAllOnesValue();
+ return Known.isZero();
}
// Per-component check doesn't work with zeroinitializer
@@ -558,7 +558,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
KnownBits Known(BitWidth);
computeKnownBits(Elem, Known, DL);
- if (Known.Zero.isAllOnesValue())
+ if (Known.isZero())
return true;
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e33e0812a08..a7f3ff672ae 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -342,7 +342,6 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
@@ -351,7 +350,7 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
TrailZ = std::min(TrailZ, BitWidth);
LeadZ = std::min(LeadZ, BitWidth);
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(TrailZ);
Known.Zero.setHighBits(LeadZ);
@@ -529,15 +528,13 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
if (Arg == V && isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.clearAllBits();
- Known.One.setAllBits();
+ Known.setAllOnes();
return;
}
if (match(Arg, m_Not(m_Specific(V))) &&
isValidAssumeForContext(I, Q.CxtI, Q.DT)) {
assert(BitWidth == 1 && "assume operand is not i1?");
- Known.Zero.setAllBits();
- Known.One.clearAllBits();
+ Known.setAllZero();
return;
}
@@ -719,7 +716,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
+ if (RHSKnown.isAllOnes() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
Known.makeNonNegative();
}
@@ -741,7 +738,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
+ if (RHSKnown.isZero() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
Known.makeNegative();
}
@@ -776,8 +773,7 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
// behavior, or we might have a bug in the compiler. We can't assert/crash, so
// clear out the known bits, try to warn the user, and hope for the best.
if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (Q.ORE) {
auto *CxtI = const_cast<Instruction *>(Q.CxtI);
@@ -813,10 +809,8 @@ static void computeKnownBitsFromShiftOperator(
// If there is conflict between Known.Zero and Known.One, this must be an
// overflowing left shift, so the shift result is undefined. Clear Known
// bits so that other code could propagate this undef.
- if ((Known.Zero & Known.One) != 0) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if ((Known.Zero & Known.One) != 0)
+ Known.resetAll();
return;
}
@@ -826,8 +820,7 @@ static void computeKnownBitsFromShiftOperator(
// If the shift amount could be greater than or equal to the bit-width of the LHS, the
// value could be undef, so we don't know anything about it.
if ((~Known.Zero).uge(BitWidth)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
@@ -839,8 +832,7 @@ static void computeKnownBitsFromShiftOperator(
// It would be more-clearly correct to use the two temporaries for this
// calculation. Reusing the APInts here to prevent unnecessary allocations.
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
// If we know the shifter operand is nonzero, we can sometimes infer more
// known bits. However this is expensive to compute, so be lazy about it and
@@ -886,10 +878,8 @@ static void computeKnownBitsFromShiftOperator(
// return anything we'd like, but we need to make sure the sets of known bits
// stay disjoint (it should be better for some other code to actually
// propagate the undef than to pick a value here using known bits).
- if (Known.Zero.intersects(Known.One)) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
- }
+ if (Known.Zero.intersects(Known.One))
+ Known.resetAll();
}
static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
@@ -924,7 +914,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
m_Value(Y))) ||
match(I->getOperand(1), m_Add(m_Specific(I->getOperand(0)),
m_Value(Y))))) {
- Known2.Zero.clearAllBits(); Known2.One.clearAllBits();
+ Known2.resetAll();
computeKnownBits(Y, Known2, Depth + 1, Q);
if (Known2.One.countTrailingOnes() > 0)
Known.Zero.setBit(0);
@@ -965,8 +955,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
unsigned LeadZ = Known2.Zero.countLeadingOnes();
- Known2.One.clearAllBits();
- Known2.Zero.clearAllBits();
+ Known2.resetAll();
computeKnownBits(I->getOperand(1), Known2, Depth + 1, Q);
unsigned RHSUnknownLeadingOnes = Known2.One.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
@@ -1198,8 +1187,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
@@ -1500,8 +1488,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Null and aggregate-zero are all-zeros.
if (isa<ConstantPointerNull>(V) || isa<ConstantAggregateZero>(V)) {
- Known.One.clearAllBits();
- Known.Zero.setAllBits();
+ Known.setAllZero();
return;
}
// Handle a constant vector by taking the intersection of the known bits of
@@ -1528,8 +1515,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
Constant *Element = CV->getAggregateElement(i);
auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
if (!ElementCI) {
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
return;
}
Elt = ElementCI->getValue();
@@ -1540,7 +1526,7 @@ void computeKnownBits(const Value *V, KnownBits &Known, unsigned Depth,
}
// Start out not knowing anything.
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
// We can't imply anything about undefs.
if (isa<UndefValue>(V))
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index efcb6158ed3..d605a1dc1c2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -2044,8 +2044,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
if (M < 0) {
// For UNDEF elements, we don't know anything about the common state of
// the shuffle result.
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
DemandedLHS.clearAllBits();
DemandedRHS.clearAllBits();
break;
@@ -2218,14 +2217,13 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
// Also compute a conservative estimate for high known-0 bits.
// More trickiness is possible, but this is sufficient for the
// interesting case of alignment computation.
- Known.One.clearAllBits();
unsigned TrailZ = Known.Zero.countTrailingOnes() +
Known2.Zero.countTrailingOnes();
unsigned LeadZ = std::max(Known.Zero.countLeadingOnes() +
Known2.Zero.countLeadingOnes(),
BitWidth) - BitWidth;
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setLowBits(std::min(TrailZ, BitWidth));
Known.Zero.setHighBits(std::min(LeadZ, BitWidth));
break;
@@ -2598,8 +2596,7 @@ void SelectionDAG::computeKnownBits(SDValue Op, KnownBits &Known,
uint32_t Leaders = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
- Known.One.clearAllBits();
- Known.Zero.clearAllBits();
+ Known.resetAll();
Known.Zero.setHighBits(Leaders);
break;
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index cf3eed827af..23f597db140 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -1303,7 +1303,7 @@ void TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
Op.getOpcode() == ISD::INTRINSIC_VOID) &&
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
}
/// This method can be implemented by targets that want to expose additional
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 64e1b8f0d7f..915d1d9e0e6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -3580,7 +3580,7 @@ void AMDGPUTargetLowering::computeKnownBitsForTargetNode(
const SDValue Op, KnownBits &Known,
const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits(); // Don't know anything.
+ Known.resetAll(); // Don't know anything.
KnownBits Known2;
unsigned Opc = Op.getOpcode();
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 932ab9d00e4..f248a829b8e 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -12640,7 +12640,7 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const SelectionDAG &DAG,
unsigned Depth) const {
unsigned BitWidth = Known.getBitWidth();
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case ARMISD::ADDC:
@@ -12655,7 +12655,8 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case ARMISD::CMOV: {
// Bits are known zero/one if known on the LHS and RHS.
DAG.computeKnownBits(Op.getOperand(0), Known, Depth+1);
- if (Known.Zero == 0 && Known.One == 0) return;
+ if (Known.isUnknown())
+ return;
KnownBits KnownRHS;
DAG.computeKnownBits(Op.getOperand(1), KnownRHS, Depth+1);
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 483e9b171d5..685f24cb502 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -12031,7 +12031,7 @@ void PPCTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case PPCISD::LBRX: {
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index c44e371856a..acb34d5baaa 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -1881,7 +1881,7 @@ void SparcTargetLowering::computeKnownBitsForTargetNode
const SelectionDAG &DAG,
unsigned Depth) const {
KnownBits Known2;
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b5d1962a297..a1ade437297 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -26614,7 +26614,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
"Should use MaskedValueIsZero if you don't know whether Op"
" is a target node!");
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Opc) {
default: break;
case X86ISD::ADD:
@@ -26644,7 +26644,7 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
case X86ISD::VSRLI: {
if (auto *ShiftImm = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
if (ShiftImm->getAPIntValue().uge(VT.getScalarSizeInBits())) {
- Known.Zero.setAllBits();
+ Known.setAllZero();
break;
}
diff --git a/llvm/lib/Target/XCore/XCoreISelLowering.cpp b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
index 4d3ecf25dc3..b8742683a0c 100644
--- a/llvm/lib/Target/XCore/XCoreISelLowering.cpp
+++ b/llvm/lib/Target/XCore/XCoreISelLowering.cpp
@@ -1825,7 +1825,7 @@ void XCoreTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth) const {
- Known.Zero.clearAllBits(); Known.One.clearAllBits();
+ Known.resetAll();
switch (Op.getOpcode()) {
default: break;
case XCoreISD::LADD:
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 4fd90d78a63..6989d67f006 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3619,7 +3619,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// then this one is redundant, and should be removed.
KnownBits Known(1);
computeKnownBits(IIOperand, Known, 0, II);
- if (Known.One.isAllOnesValue())
+ if (Known.isAllOnes())
return eraseInstFromFunction(*II);
// Update the cache of affected values for this assumption (we might be
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index 60970775de6..34ce235b3fe 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -4050,7 +4050,7 @@ Instruction *InstCombiner::foldICmpUsingKnownBits(ICmpInst &I) {
// is set. If the comparison is against zero, then this is a check to see if
// *that* bit is set.
APInt Op0KnownZeroInverted = ~Op0Known.Zero;
- if (~Op1Known.Zero == 0) {
+ if (Op1Known.isZero()) {
// If the LHS is an AND with the same constant, look through it.
Value *LHS = nullptr;
const APInt *LHSC;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
index c42c76addfc..05b01774cd5 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp
@@ -120,8 +120,7 @@ Value *InstCombiner::SimplifyDemandedUseBits(Value *V, APInt DemandedMask,
return nullptr;
}
- Known.Zero.clearAllBits();
- Known.One.clearAllBits();
+ Known.resetAll();
if (DemandedMask == 0) // Not demanding any bits from V.
return UndefValue::get(VTy);
OpenPOWER on IntegriCloud