summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Analysis/ConstantFolding.cpp7
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp24
-rw-r--r--llvm/lib/Analysis/Lint.cpp8
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp8
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp14
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp16
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp3
-rw-r--r--llvm/lib/Transforms/InstCombine/InstructionCombining.cpp15
-rw-r--r--llvm/lib/Transforms/Scalar/GuardWidening.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/Local.cpp6
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyCFG.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp4
13 files changed, 34 insertions, 82 deletions
diff --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 0ca712bbfe7..79517ec6a3a 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -687,11 +687,8 @@ Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
// bits.
if (Opc == Instruction::And) {
- unsigned BitWidth = DL.getTypeSizeInBits(Op0->getType()->getScalarType());
- KnownBits Known0(BitWidth);
- KnownBits Known1(BitWidth);
- computeKnownBits(Op0, Known0, DL);
- computeKnownBits(Op1, Known1, DL);
+ KnownBits Known0 = computeKnownBits(Op0, DL);
+ KnownBits Known1 = computeKnownBits(Op1, DL);
if ((Known1.One | Known0.Zero).isAllOnesValue()) {
// All the bits of Op0 that the 'and' could be masking are already zero.
return Op0;
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 69aa5b98d28..9572d81e471 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -688,9 +688,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
if (isNUW)
return Op0;
- unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
if (Known.Zero.isMaxSignedValue()) {
// Op1 is either 0 or the minimum signed value. If the sub is NSW, then
// Op1 must be 0 because negating the minimum signed value is undefined.
@@ -1309,15 +1307,13 @@ static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
// If any bits in the shift amount make that value greater than or equal to
// the number of bits in the type, the shift is undefined.
- unsigned BitWidth = Op1->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(Op1, Known, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
- if (Known.One.getLimitedValue() >= BitWidth)
+ KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
+ if (Known.One.getLimitedValue() >= Known.getBitWidth())
return UndefValue::get(Op0->getType());
// If all valid bits in the shift amount are known zero, the first operand is
// unchanged.
- unsigned NumValidShiftBits = Log2_32_Ceil(BitWidth);
+ unsigned NumValidShiftBits = Log2_32_Ceil(Known.getBitWidth());
if (Known.countMinTrailingZeros() >= NumValidShiftBits)
return Op0;
@@ -1343,9 +1339,7 @@ static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
// The low bit cannot be shifted out of an exact shift if it is set.
if (isExact) {
- unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
- KnownBits Op0Known(BitWidth);
- computeKnownBits(Op0, Op0Known, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
if (Op0Known.One[0])
return Op0;
}
@@ -3372,9 +3366,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
if (ICmpInst::isEquality(Pred)) {
const APInt *RHSVal;
if (match(RHS, m_APInt(RHSVal))) {
- unsigned BitWidth = RHSVal->getBitWidth();
- KnownBits LHSKnown(BitWidth);
- computeKnownBits(LHS, LHSKnown, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
+ KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
if (LHSKnown.Zero.intersects(*RHSVal) ||
!LHSKnown.One.isSubsetOf(*RHSVal))
return Pred == ICmpInst::ICMP_EQ ? ConstantInt::getFalse(ITy)
@@ -4684,9 +4676,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
// In general, it is possible for computeKnownBits to determine all bits in a
// value even when the operands are not all constants.
if (!Result && I->getType()->isIntOrIntVectorTy()) {
- unsigned BitWidth = I->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(I, Known, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
+ KnownBits Known = computeKnownBits(I, Q.DL, /*Depth*/ 0, Q.AC, I, Q.DT, ORE);
if (Known.isConstant())
Result = ConstantInt::get(I->getType(), Known.getConstant());
}
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 471ccb62970..e6391792bc2 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -534,9 +534,7 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
VectorType *VecTy = dyn_cast<VectorType>(V->getType());
if (!VecTy) {
- unsigned BitWidth = V->getType()->getIntegerBitWidth();
- KnownBits Known(BitWidth);
- computeKnownBits(V, Known, DL, 0, AC, dyn_cast<Instruction>(V), DT);
+ KnownBits Known = computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
return Known.isZero();
}
@@ -550,14 +548,12 @@ static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
// For a vector, KnownZero will only be true if all values are zero, so check
// this per component
- unsigned BitWidth = VecTy->getElementType()->getIntegerBitWidth();
for (unsigned I = 0, N = VecTy->getNumElements(); I != N; ++I) {
Constant *Elem = C->getAggregateElement(I);
if (isa<UndefValue>(Elem))
return true;
- KnownBits Known(BitWidth);
- computeKnownBits(Elem, Known, DL);
+ KnownBits Known = computeKnownBits(Elem, DL);
if (Known.isZero())
return true;
}
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 78ded8141c0..31175d034a5 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -4648,10 +4648,7 @@ uint32_t ScalarEvolution::GetMinTrailingZerosImpl(const SCEV *S) {
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- unsigned BitWidth = getTypeSizeInBits(U->getType());
- KnownBits Known(BitWidth);
- computeKnownBits(U->getValue(), Known, getDataLayout(), 0, &AC,
- nullptr, &DT);
+ KnownBits Known = computeKnownBits(U->getValue(), getDataLayout(), 0, &AC, nullptr, &DT);
return Known.countMinTrailingZeros();
}
@@ -4831,8 +4828,7 @@ ScalarEvolution::getRange(const SCEV *S,
const DataLayout &DL = getDataLayout();
if (SignHint == ScalarEvolution::HINT_RANGE_UNSIGNED) {
// For a SCEVUnknown, ask ValueTracking.
- KnownBits Known(BitWidth);
- computeKnownBits(U->getValue(), Known, DL, 0, &AC, nullptr, &DT);
+ KnownBits Known = computeKnownBits(U->getValue(), DL, 0, &AC, nullptr, &DT);
if (Known.One != ~Known.Zero + 1)
ConservativeResult =
ConservativeResult.intersectWith(ConstantRange(Known.One,
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index 733eeb1767a..7204bf51768 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -861,12 +861,9 @@ bool InstCombiner::willNotOverflowSignedSub(const Value *LHS,
ComputeNumSignBits(RHS, 0, &CxtI) > 1)
return true;
- unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
- KnownBits LHSKnown(BitWidth);
- computeKnownBits(LHS, LHSKnown, 0, &CxtI);
+ KnownBits LHSKnown = computeKnownBits(LHS, 0, &CxtI);
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(RHS, RHSKnown, 0, &CxtI);
+ KnownBits RHSKnown = computeKnownBits(RHS, 0, &CxtI);
// Subtraction of two 2's complement numbers having identical signs will
// never overflow.
@@ -1059,9 +1056,7 @@ Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
// If this is a xor that was canonicalized from a sub, turn it back into
// a sub and fuse this add with it.
if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
- IntegerType *IT = cast<IntegerType>(I.getType());
- KnownBits LHSKnown(IT->getBitWidth());
- computeKnownBits(XorLHS, LHSKnown, 0, &I);
+ KnownBits LHSKnown = computeKnownBits(XorLHS, 0, &I);
if ((XorRHS->getValue() | LHSKnown.Zero).isAllOnesValue())
return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
XorLHS);
@@ -1577,8 +1572,7 @@ Instruction *InstCombiner::visitSub(BinaryOperator &I) {
// Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
// zero.
if (Op0C->isMask()) {
- KnownBits RHSKnown(BitWidth);
- computeKnownBits(Op1, RHSKnown, 0, &I);
+ KnownBits RHSKnown = computeKnownBits(Op1, 0, &I);
if ((*Op0C | RHSKnown.Zero).isAllOnesValue())
return BinaryOperator::CreateXor(Op1, Op0);
}
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index face7abcc95..ba9eb59e6fd 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -1378,9 +1378,7 @@ static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombiner &IC) {
if (!IT)
return nullptr;
- unsigned BitWidth = IT->getBitWidth();
- KnownBits Known(BitWidth);
- IC.computeKnownBits(Op0, Known, 0, &II);
+ KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
// Create a mask for bits above (ctlz) or below (cttz) the first known one.
bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
index f4bf5221f6a..766939c56df 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCasts.cpp
@@ -692,8 +692,7 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
// This only works for EQ and NE
ICI->isEquality()) {
// If Op1C some other power of two, convert:
- KnownBits Known(Op1C->getType()->getBitWidth());
- computeKnownBits(ICI->getOperand(0), Known, 0, &CI);
+ KnownBits Known = computeKnownBits(ICI->getOperand(0), 0, &CI);
APInt KnownZeroMask(~Known.Zero);
if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
@@ -737,14 +736,11 @@ Instruction *InstCombiner::transformZExtICmp(ICmpInst *ICI, ZExtInst &CI,
// may lead to additional simplifications.
if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
if (IntegerType *ITy = dyn_cast<IntegerType>(CI.getType())) {
- uint32_t BitWidth = ITy->getBitWidth();
Value *LHS = ICI->getOperand(0);
Value *RHS = ICI->getOperand(1);
- KnownBits KnownLHS(BitWidth);
- KnownBits KnownRHS(BitWidth);
- computeKnownBits(LHS, KnownLHS, 0, &CI);
- computeKnownBits(RHS, KnownRHS, 0, &CI);
+ KnownBits KnownLHS = computeKnownBits(LHS, 0, &CI);
+ KnownBits KnownRHS = computeKnownBits(RHS, 0, &CI);
if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
@@ -1063,9 +1059,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
// the icmp and sext into bitwise/integer operations.
if (ICI->hasOneUse() &&
ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
- unsigned BitWidth = Op1C->getType()->getBitWidth();
- KnownBits Known(BitWidth);
- computeKnownBits(Op0, Known, 0, &CI);
+ KnownBits Known = computeKnownBits(Op0, 0, &CI);
APInt KnownZeroMask(~Known.Zero);
if (KnownZeroMask.isPowerOf2()) {
@@ -1104,7 +1098,7 @@ Instruction *InstCombiner::transformSExtICmp(ICmpInst *ICI, Instruction &CI) {
// Distribute the bit over the whole bit width.
In = Builder->CreateAShr(In, ConstantInt::get(In->getType(),
- BitWidth - 1), "sext");
+ KnownZeroMask.getBitWidth() - 1), "sext");
}
if (CI.getType() == In->getType())
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
index fed67780e0f..5ca0ed25675 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCompares.cpp
@@ -1478,8 +1478,7 @@ Instruction *InstCombiner::foldICmpTruncConstant(ICmpInst &Cmp,
// of the high bits truncated out of x are known.
unsigned DstBits = Trunc->getType()->getScalarSizeInBits(),
SrcBits = X->getType()->getScalarSizeInBits();
- KnownBits Known(SrcBits);
- computeKnownBits(X, Known, 0, &Cmp);
+ KnownBits Known = computeKnownBits(X, 0, &Cmp);
// If all the high bits are known, we can do this xform.
if ((Known.Zero | Known.One).countLeadingOnes() >= SrcBits - DstBits) {
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 7ed9fd566b3..90b024b753c 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2180,8 +2180,7 @@ Instruction *InstCombiner::visitReturnInst(ReturnInst &RI) {
// There might be assume intrinsics dominating this return that completely
// determine the value. If so, constant fold it.
- KnownBits Known(VTy->getPrimitiveSizeInBits());
- computeKnownBits(ResultOp, Known, 0, &RI);
+ KnownBits Known = computeKnownBits(ResultOp, 0, &RI);
if (Known.isConstant())
RI.setOperand(0, Constant::getIntegerValue(VTy, Known.getConstant()));
@@ -2242,9 +2241,7 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
return &SI;
}
- unsigned BitWidth = cast<IntegerType>(Cond->getType())->getBitWidth();
- KnownBits Known(BitWidth);
- computeKnownBits(Cond, Known, 0, &SI);
+ KnownBits Known = computeKnownBits(Cond, 0, &SI);
unsigned LeadingKnownZeros = Known.countMinLeadingZeros();
unsigned LeadingKnownOnes = Known.countMinLeadingOnes();
@@ -2257,12 +2254,12 @@ Instruction *InstCombiner::visitSwitchInst(SwitchInst &SI) {
LeadingKnownOnes, C.getCaseValue()->getValue().countLeadingOnes());
}
- unsigned NewWidth = BitWidth - std::max(LeadingKnownZeros, LeadingKnownOnes);
+ unsigned NewWidth = Known.getBitWidth() - std::max(LeadingKnownZeros, LeadingKnownOnes);
// Shrink the condition operand if the new type is smaller than the old type.
// This may produce a non-standard type for the switch, but that's ok because
// the backend should extend back to a legal type for the target.
- if (NewWidth > 0 && NewWidth < BitWidth) {
+ if (NewWidth > 0 && NewWidth < Known.getBitWidth()) {
IntegerType *Ty = IntegerType::get(SI.getContext(), NewWidth);
Builder->SetInsertPoint(&SI);
Value *NewCond = Builder->CreateTrunc(Cond, Ty, "trunc");
@@ -2841,9 +2838,7 @@ bool InstCombiner::run() {
// a value even when the operands are not all constants.
Type *Ty = I->getType();
if (ExpensiveCombines && !I->use_empty() && Ty->isIntOrIntVectorTy()) {
- unsigned BitWidth = Ty->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(I, Known, /*Depth*/0, I);
+ KnownBits Known = computeKnownBits(I, /*Depth*/0, I);
if (Known.isConstant()) {
Constant *C = ConstantInt::get(Ty, Known.getConstant());
DEBUG(dbgs() << "IC: ConstFold (all bits known) to: " << *C <<
diff --git a/llvm/lib/Transforms/Scalar/GuardWidening.cpp b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
index 198d2b2b024..65a2cd95567 100644
--- a/llvm/lib/Transforms/Scalar/GuardWidening.cpp
+++ b/llvm/lib/Transforms/Scalar/GuardWidening.cpp
@@ -537,9 +537,7 @@ bool GuardWideningImpl::parseRangeChecks(
Changed = true;
} else if (match(Check.getBase(),
m_Or(m_Value(OpLHS), m_ConstantInt(OpRHS)))) {
- unsigned BitWidth = OpLHS->getType()->getScalarSizeInBits();
- KnownBits Known(BitWidth);
- computeKnownBits(OpLHS, Known, DL);
+ KnownBits Known = computeKnownBits(OpLHS, DL);
if ((OpRHS->getValue() & Known.Zero) == OpRHS->getValue()) {
Check.setBase(OpLHS);
APInt NewOffset = Check.getOffsetValue() + OpRHS->getValue();
diff --git a/llvm/lib/Transforms/Utils/Local.cpp b/llvm/lib/Transforms/Utils/Local.cpp
index 29f67a8f53b..f28ed7c5caf 100644
--- a/llvm/lib/Transforms/Utils/Local.cpp
+++ b/llvm/lib/Transforms/Utils/Local.cpp
@@ -1037,17 +1037,15 @@ unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DominatorTree *DT) {
assert(V->getType()->isPointerTy() &&
"getOrEnforceKnownAlignment expects a pointer!");
- unsigned BitWidth = DL.getPointerTypeSizeInBits(V->getType());
- KnownBits Known(BitWidth);
- computeKnownBits(V, Known, DL, 0, AC, CxtI, DT);
+ KnownBits Known = computeKnownBits(V, DL, 0, AC, CxtI, DT);
unsigned TrailZ = Known.countMinTrailingZeros();
// Avoid trouble with ridiculously large TrailZ values, such as
// those computed from a null pointer.
TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1));
- unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
+ unsigned Align = 1u << std::min(Known.getBitWidth() - 1, TrailZ);
// LLVM doesn't support alignments larger than this currently.
Align = std::min(Align, +Value::MaximumAlignment);
diff --git a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
index 27f72fcd8bd..6441cf89f6b 100644
--- a/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyCFG.cpp
@@ -4368,8 +4368,7 @@ static bool EliminateDeadSwitchCases(SwitchInst *SI, AssumptionCache *AC,
const DataLayout &DL) {
Value *Cond = SI->getCondition();
unsigned Bits = Cond->getType()->getIntegerBitWidth();
- KnownBits Known(Bits);
- computeKnownBits(Cond, Known, DL, 0, AC, SI);
+ KnownBits Known = computeKnownBits(Cond, DL, 0, AC, SI);
// We can also eliminate cases by determining that their values are outside of
// the limited range of the condition based on how many significant (non-sign)
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 85c9464b556..49effda5d83 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -466,9 +466,7 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilder<> &B,
}
Value *Offset = GEP->getOperand(2);
- unsigned BitWidth = Offset->getType()->getIntegerBitWidth();
- KnownBits Known(BitWidth);
- computeKnownBits(Offset, Known, DL, 0, nullptr, CI, nullptr);
+ KnownBits Known = computeKnownBits(Offset, DL, 0, nullptr, CI, nullptr);
Known.Zero.flipAllBits();
uint64_t ArrSize =
cast<ArrayType>(GEP->getSourceElementType())->getNumElements();
OpenPOWER on IntegriCloud