summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorNuno Lopes <nunoplopes@sapo.pt>2017-09-09 18:23:11 +0000
committerNuno Lopes <nunoplopes@sapo.pt>2017-09-09 18:23:11 +0000
commit404f106d71178d3a9f27167e6a060ad577b5fadb (patch)
treec77b721b647825ed1de4f0df7f6a51434482fcb4 /llvm/lib
parent97a56866a2510f8ae3ecfde7b9d9faf444299897 (diff)
downloadbcm5719-llvm-404f106d71178d3a9f27167e6a060ad577b5fadb.tar.gz
bcm5719-llvm-404f106d71178d3a9f27167e6a060ad577b5fadb.zip
Merge isKnownNonNull into isKnownNonZero
It now knows the tricks of both functions. Also, fix a bug that considered allocas of non-zero address space to be always non null Differential Revision: https://reviews.llvm.org/D37628 llvm-svn: 312869
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp16
-rw-r--r--llvm/lib/Analysis/LazyValueInfo.cpp12
-rw-r--r--llvm/lib/Analysis/Loads.cpp2
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp182
-rw-r--r--llvm/lib/Transforms/IPO/FunctionAttrs.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp4
-rw-r--r--llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp15
7 files changed, 115 insertions, 120 deletions
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index ad3f9c39e48..0fb0b7d2449 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -2063,13 +2063,14 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
static Constant *
computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
const DominatorTree *DT, CmpInst::Predicate Pred,
- const Instruction *CxtI, Value *LHS, Value *RHS) {
+ AssumptionCache *AC, const Instruction *CxtI,
+ Value *LHS, Value *RHS) {
// First, skip past any trivial no-ops.
LHS = LHS->stripPointerCasts();
RHS = RHS->stripPointerCasts();
// A non-null pointer is not equal to a null pointer.
- if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
+ if (llvm::isKnownNonZero(LHS, DL) && isa<ConstantPointerNull>(RHS) &&
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
return ConstantInt::get(GetCompareTy(LHS),
!CmpInst::isTrueWhenEqual(Pred));
@@ -2224,9 +2225,11 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
// cannot be elided. We cannot fold malloc comparison to null. Also, the
// dynamic allocation call could be either of the operands.
Value *MI = nullptr;
- if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
+ if (isAllocLikeFn(LHS, TLI) &&
+ llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
MI = LHS;
- else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
+ else if (isAllocLikeFn(RHS, TLI) &&
+ llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
MI = RHS;
// FIXME: We should also fold the compare when the pointer escapes, but the
// compare dominates the pointer escape
@@ -3313,7 +3316,8 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// Simplify comparisons of related pointers using a powerful, recursive
// GEP-walk when we have target data available..
if (LHS->getType()->isPointerTy())
- if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI, LHS, RHS))
+ if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI, LHS,
+ RHS))
return C;
if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
@@ -3321,7 +3325,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
Q.DL.getTypeSizeInBits(CLHS->getType()) &&
Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
Q.DL.getTypeSizeInBits(CRHS->getType()))
- if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.CxtI,
+ if (auto *C = computePointerICmp(Q.DL, Q.TLI, Q.DT, Pred, Q.AC, Q.CxtI,
CLHS->getPointerOperand(),
CRHS->getPointerOperand()))
return C;
diff --git a/llvm/lib/Analysis/LazyValueInfo.cpp b/llvm/lib/Analysis/LazyValueInfo.cpp
index 9be773f13fc..31f1d44653d 100644
--- a/llvm/lib/Analysis/LazyValueInfo.cpp
+++ b/llvm/lib/Analysis/LazyValueInfo.cpp
@@ -817,12 +817,12 @@ bool LazyValueInfoImpl::solveBlockValueImpl(LVILatticeVal &Res,
// definition. We could easily extend this to look through geps, bitcasts,
// and the like to prove non-nullness, but it's not clear that's worth it
// compile time wise. The context-insensitive value walk done inside
- // isKnownNonNull gets most of the profitable cases at much less expense.
+ // isKnownNonZero gets most of the profitable cases at much less expense.
// This does mean that we have a sensativity to where the defining
// instruction is placed, even if it could legally be hoisted much higher.
// That is unfortunate.
PointerType *PT = dyn_cast<PointerType>(BBI->getType());
- if (PT && isKnownNonNull(BBI)) {
+ if (PT && isKnownNonZero(BBI, DL)) {
Res = LVILatticeVal::getNot(ConstantPointerNull::get(PT));
return true;
}
@@ -901,7 +901,7 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// Before giving up, see if we can prove the pointer non-null local to
// this particular block.
if (Val->getType()->isPointerTy() &&
- (isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) {
+ (isKnownNonZero(Val, DL) || isObjectDereferencedInBlock(Val, BB))) {
PointerType *PTy = cast<PointerType>(Val->getType());
Result = LVILatticeVal::getNot(ConstantPointerNull::get(PTy));
} else {
@@ -1886,17 +1886,17 @@ LazyValueInfo::Tristate
LazyValueInfo::getPredicateAt(unsigned Pred, Value *V, Constant *C,
Instruction *CxtI) {
// Is or is not NonNull are common predicates being queried. If
- // isKnownNonNull can tell us the result of the predicate, we can
+ // isKnownNonZero can tell us the result of the predicate, we can
// return it quickly. But this is only a fastpath, and falling
// through would still be correct.
+ const DataLayout &DL = CxtI->getModule()->getDataLayout();
if (V->getType()->isPointerTy() && C->isNullValue() &&
- isKnownNonNull(V->stripPointerCasts())) {
+ isKnownNonZero(V->stripPointerCasts(), DL)) {
if (Pred == ICmpInst::ICMP_EQ)
return LazyValueInfo::False;
else if (Pred == ICmpInst::ICMP_NE)
return LazyValueInfo::True;
}
- const DataLayout &DL = CxtI->getModule()->getDataLayout();
LVILatticeVal Result = getImpl(PImpl, AC, &DL, DT).getValueAt(V, CxtI);
Tristate Ret = getPredicateResult(Pred, C, Result, DL, TLI);
if (Ret != Unknown)
diff --git a/llvm/lib/Analysis/Loads.cpp b/llvm/lib/Analysis/Loads.cpp
index 591b0fc481d..78b673be8a0 100644
--- a/llvm/lib/Analysis/Loads.cpp
+++ b/llvm/lib/Analysis/Loads.cpp
@@ -72,7 +72,7 @@ static bool isDereferenceableAndAlignedPointer(
V->getPointerDereferenceableBytes(DL, CheckForNonNull));
if (KnownDerefBytes.getBoolValue()) {
if (KnownDerefBytes.uge(Size))
- if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
+ if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT))
return isAligned(V, Align, DL);
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index f47559b850a..a49da3a861e 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1749,6 +1749,58 @@ static bool isGEPKnownNonNull(const GEPOperator *GEP, unsigned Depth,
return false;
}
+static bool isKnownNonNullFromDominatingCondition(const Value *V,
+ const Instruction *CtxI,
+ const DominatorTree *DT) {
+ assert(V->getType()->isPointerTy() && "V must be pointer type");
+ assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
+
+ if (!CtxI || !DT)
+ return false;
+
+ unsigned NumUsesExplored = 0;
+ for (auto *U : V->users()) {
+ // Avoid massive lists
+ if (NumUsesExplored >= DomConditionsMaxUses)
+ break;
+ NumUsesExplored++;
+
+ // If the value is used as an argument to a call or invoke, then argument
+ // attributes may provide an answer about null-ness.
+ if (auto CS = ImmutableCallSite(U))
+ if (auto *CalledFunc = CS.getCalledFunction())
+ for (const Argument &Arg : CalledFunc->args())
+ if (CS.getArgOperand(Arg.getArgNo()) == V &&
+ Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
+ return true;
+
+ // Consider only compare instructions uniquely controlling a branch
+ CmpInst::Predicate Pred;
+ if (!match(const_cast<User *>(U),
+ m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
+ (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
+ continue;
+
+ for (auto *CmpU : U->users()) {
+ if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
+ assert(BI->isConditional() && "uses a comparison!");
+
+ BasicBlock *NonNullSuccessor =
+ BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
+ BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
+ if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
+ return true;
+ } else if (Pred == ICmpInst::ICMP_NE &&
+ match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
+ DT->dominates(cast<Instruction>(CmpU), CtxI)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
/// Does the 'Range' metadata (which must be a valid MD_range operand list)
/// ensure that the value it's attached to is never Value? 'RangeType' is
/// is the type of the value described by the range.
@@ -1794,7 +1846,15 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
return true;
}
- return false;
+ // A global variable in address space 0 is non null unless extern weak
+ // or an absolute symbol reference. Other address spaces may have null as a
+ // valid address for a global, so we can't assume anything.
+ if (const GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
+ if (!GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
+ GV->getType()->getAddressSpace() == 0)
+ return true;
+ } else
+ return false;
}
if (auto *I = dyn_cast<Instruction>(V)) {
@@ -1809,14 +1869,36 @@ bool isKnownNonZero(const Value *V, unsigned Depth, const Query &Q) {
}
}
+ // Check for pointer simplifications.
+ if (V->getType()->isPointerTy()) {
+ // Alloca never returns null, malloc might.
+ if (isa<AllocaInst>(V) && Q.DL.getAllocaAddrSpace() == 0)
+ return true;
+
+ // A byval, inalloca, or nonnull argument is never null.
+ if (const Argument *A = dyn_cast<Argument>(V))
+ if (A->hasByValOrInAllocaAttr() || A->hasNonNullAttr())
+ return true;
+
+ // A Load tagged with nonnull metadata is never null.
+ if (const LoadInst *LI = dyn_cast<LoadInst>(V))
+ if (LI->getMetadata(LLVMContext::MD_nonnull))
+ return true;
+
+ if (auto CS = ImmutableCallSite(V))
+ if (CS.isReturnNonNull())
+ return true;
+ }
+
// The remaining tests are all recursive, so bail out if we hit the limit.
if (Depth++ >= MaxDepth)
return false;
- // Check for pointer simplifications.
+ // Check for recursive pointer simplifications.
if (V->getType()->isPointerTy()) {
- if (isKnownNonNullAt(V, Q.CxtI, Q.DT))
+ if (isKnownNonNullFromDominatingCondition(V, Q.CxtI, Q.DT))
return true;
+
if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V))
if (isGEPKnownNonNull(GEP, Depth, Q))
return true;
@@ -3482,100 +3564,6 @@ bool llvm::mayBeMemoryDependent(const Instruction &I) {
return I.mayReadOrWriteMemory() || !isSafeToSpeculativelyExecute(&I);
}
-/// Return true if we know that the specified value is never null.
-bool llvm::isKnownNonNull(const Value *V) {
- assert(V->getType()->isPointerTy() && "V must be pointer type");
-
- // Alloca never returns null, malloc might.
- if (isa<AllocaInst>(V)) return true;
-
- // A byval, inalloca, or nonnull argument is never null.
- if (const Argument *A = dyn_cast<Argument>(V))
- return A->hasByValOrInAllocaAttr() || A->hasNonNullAttr();
-
- // A global variable in address space 0 is non null unless extern weak
- // or an absolute symbol reference. Other address spaces may have null as a
- // valid address for a global, so we can't assume anything.
- if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
- return !GV->isAbsoluteSymbolRef() && !GV->hasExternalWeakLinkage() &&
- GV->getType()->getAddressSpace() == 0;
-
- // A Load tagged with nonnull metadata is never null.
- if (const LoadInst *LI = dyn_cast<LoadInst>(V))
- return LI->getMetadata(LLVMContext::MD_nonnull);
-
- if (auto CS = ImmutableCallSite(V))
- if (CS.isReturnNonNull())
- return true;
-
- return false;
-}
-
-static bool isKnownNonNullFromDominatingCondition(const Value *V,
- const Instruction *CtxI,
- const DominatorTree *DT) {
- assert(V->getType()->isPointerTy() && "V must be pointer type");
- assert(!isa<ConstantData>(V) && "Did not expect ConstantPointerNull");
- assert(CtxI && "Context instruction required for analysis");
- assert(DT && "Dominator tree required for analysis");
-
- unsigned NumUsesExplored = 0;
- for (auto *U : V->users()) {
- // Avoid massive lists
- if (NumUsesExplored >= DomConditionsMaxUses)
- break;
- NumUsesExplored++;
-
- // If the value is used as an argument to a call or invoke, then argument
- // attributes may provide an answer about null-ness.
- if (auto CS = ImmutableCallSite(U))
- if (auto *CalledFunc = CS.getCalledFunction())
- for (const Argument &Arg : CalledFunc->args())
- if (CS.getArgOperand(Arg.getArgNo()) == V &&
- Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
- return true;
-
- // Consider only compare instructions uniquely controlling a branch
- CmpInst::Predicate Pred;
- if (!match(const_cast<User *>(U),
- m_c_ICmp(Pred, m_Specific(V), m_Zero())) ||
- (Pred != ICmpInst::ICMP_EQ && Pred != ICmpInst::ICMP_NE))
- continue;
-
- for (auto *CmpU : U->users()) {
- if (const BranchInst *BI = dyn_cast<BranchInst>(CmpU)) {
- assert(BI->isConditional() && "uses a comparison!");
-
- BasicBlock *NonNullSuccessor =
- BI->getSuccessor(Pred == ICmpInst::ICMP_EQ ? 1 : 0);
- BasicBlockEdge Edge(BI->getParent(), NonNullSuccessor);
- if (Edge.isSingleEdge() && DT->dominates(Edge, CtxI->getParent()))
- return true;
- } else if (Pred == ICmpInst::ICMP_NE &&
- match(CmpU, m_Intrinsic<Intrinsic::experimental_guard>()) &&
- DT->dominates(cast<Instruction>(CmpU), CtxI)) {
- return true;
- }
- }
- }
-
- return false;
-}
-
-bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
- const DominatorTree *DT) {
- if (isa<ConstantPointerNull>(V) || isa<UndefValue>(V))
- return false;
-
- if (isKnownNonNull(V))
- return true;
-
- if (!CtxI || !DT)
- return false;
-
- return ::isKnownNonNullFromDominatingCondition(V, CtxI, DT);
-}
-
OverflowResult llvm::computeOverflowForUnsignedMul(const Value *LHS,
const Value *RHS,
const DataLayout &DL,
diff --git a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
index 813a4b6e283..92810c7d6cb 100644
--- a/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionAttrs.cpp
@@ -884,11 +884,13 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
FlowsToReturn.insert(Ret->getReturnValue());
+ auto &DL = F->getParent()->getDataLayout();
+
for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
Value *RetVal = FlowsToReturn[i];
// If this value is locally known to be non-null, we're good
- if (isKnownNonNull(RetVal))
+ if (isKnownNonZero(RetVal, DL))
continue;
// Otherwise, we need to look upwards since we can't make any local
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 6be0e4e3fd1..2012934dbb9 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -3743,7 +3743,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
// isKnownNonNull -> nonnull attribute
- if (isKnownNonNullAt(DerivedPtr, II, &DT))
+ if (isKnownNonZero(DerivedPtr, DL, 0, &AC, II, &DT))
II->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
}
@@ -3932,7 +3932,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
for (Value *V : CS.args()) {
if (V->getType()->isPointerTy() &&
!CS.paramHasAttr(ArgNo, Attribute::NonNull) &&
- isKnownNonNullAt(V, CS.getInstruction(), &DT))
+ isKnownNonZero(V, DL, 0, &AC, CS.getInstruction(), &DT))
ArgNos.push_back(ArgNo);
ArgNo++;
}
diff --git a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
index 1a3ab3b89af..ac28f590b01 100644
--- a/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
+++ b/llvm/lib/Transforms/Utils/PromoteMemoryToRegister.cpp
@@ -338,8 +338,8 @@ static void removeLifetimeIntrinsicUsers(AllocaInst *AI) {
/// and thus must be phi-ed with undef. We fall back to the standard alloca
/// promotion algorithm in that case.
static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
- LargeBlockInfo &LBI, DominatorTree &DT,
- AssumptionCache *AC) {
+ LargeBlockInfo &LBI, const DataLayout &DL,
+ DominatorTree &DT, AssumptionCache *AC) {
StoreInst *OnlyStore = Info.OnlyStore;
bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0));
BasicBlock *StoreBB = OnlyStore->getParent();
@@ -395,7 +395,7 @@ static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
// that information when we erase this Load. So we preserve
// it with an assume.
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
- !llvm::isKnownNonNullAt(ReplVal, LI, &DT))
+ !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
LI->replaceAllUsesWith(ReplVal);
@@ -442,6 +442,7 @@ static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info,
/// }
static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
LargeBlockInfo &LBI,
+ const DataLayout &DL,
DominatorTree &DT,
AssumptionCache *AC) {
// The trickiest case to handle is when we have large blocks. Because of this,
@@ -490,7 +491,7 @@ static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info,
// information when we erase it. So we preserve it with an assume.
Value *ReplVal = std::prev(I)->second->getOperand(0);
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
- !llvm::isKnownNonNullAt(ReplVal, LI, &DT))
+ !llvm::isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
LI->replaceAllUsesWith(ReplVal);
@@ -560,7 +561,7 @@ void PromoteMem2Reg::run() {
// If there is only a single store to this value, replace any loads of
// it that are directly dominated by the definition with the value stored.
if (Info.DefiningBlocks.size() == 1) {
- if (rewriteSingleStoreAlloca(AI, Info, LBI, DT, AC)) {
+ if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
// The alloca has been processed, move on.
RemoveFromAllocasList(AllocaNum);
++NumSingleStore;
@@ -571,7 +572,7 @@ void PromoteMem2Reg::run() {
// If the alloca is only read and written in one basic block, just perform a
// linear sweep over the block to eliminate it.
if (Info.OnlyUsedInOneBlock &&
- promoteSingleBlockAlloca(AI, Info, LBI, DT, AC)) {
+ promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) {
// The alloca has been processed, move on.
RemoveFromAllocasList(AllocaNum);
continue;
@@ -931,7 +932,7 @@ NextIteration:
// that information when we erase this Load. So we preserve
// it with an assume.
if (AC && LI->getMetadata(LLVMContext::MD_nonnull) &&
- !llvm::isKnownNonNullAt(V, LI, &DT))
+ !llvm::isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT))
addAssumeNonNull(AC, LI);
// Anything using the load now uses the current value.
OpenPOWER on IntegriCloud