summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Analysis/Lint.cpp10
-rw-r--r--llvm/lib/CodeGen/CodeGenPrepare.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp64
-rw-r--r--llvm/lib/IR/Attributes.cpp5
-rw-r--r--llvm/lib/IR/AutoUpgrade.cpp55
-rw-r--r--llvm/lib/IR/IRBuilder.cpp25
-rw-r--r--llvm/lib/IR/Verifier.cpp2
-rw-r--r--llvm/lib/Target/AArch64/AArch64FastISel.cpp7
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp6
-rw-r--r--llvm/lib/Target/ARM/ARMFastISel.cpp7
-rw-r--r--llvm/lib/Target/Mips/MipsFastISel.cpp4
-rw-r--r--llvm/lib/Target/X86/X86FastISel.cpp4
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp29
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineInternal.h2
-rw-r--r--llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp23
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp17
-rw-r--r--llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp17
-rw-r--r--llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp2
-rw-r--r--llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp31
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp32
-rw-r--r--llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp25
-rw-r--r--llvm/lib/Transforms/Utils/InlineFunction.cpp2
-rw-r--r--llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp21
24 files changed, 152 insertions, 248 deletions
diff --git a/llvm/lib/Analysis/Lint.cpp b/llvm/lib/Analysis/Lint.cpp
index 5fb5b8c2e9a..2dfb09c95ad 100644
--- a/llvm/lib/Analysis/Lint.cpp
+++ b/llvm/lib/Analysis/Lint.cpp
@@ -284,9 +284,9 @@ void Lint::visitCallSite(CallSite CS) {
MemCpyInst *MCI = cast<MemCpyInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MCI->getDest(), MemoryLocation::UnknownSize,
- MCI->getDestAlignment(), nullptr, MemRef::Write);
+ MCI->getAlignment(), nullptr, MemRef::Write);
visitMemoryReference(I, MCI->getSource(), MemoryLocation::UnknownSize,
- MCI->getSrcAlignment(), nullptr, MemRef::Read);
+ MCI->getAlignment(), nullptr, MemRef::Read);
// Check that the memcpy arguments don't overlap. The AliasAnalysis API
// isn't expressive enough for what we really want to do. Known partial
@@ -306,16 +306,16 @@ void Lint::visitCallSite(CallSite CS) {
MemMoveInst *MMI = cast<MemMoveInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MMI->getDest(), MemoryLocation::UnknownSize,
- MMI->getDestAlignment(), nullptr, MemRef::Write);
+ MMI->getAlignment(), nullptr, MemRef::Write);
visitMemoryReference(I, MMI->getSource(), MemoryLocation::UnknownSize,
- MMI->getSrcAlignment(), nullptr, MemRef::Read);
+ MMI->getAlignment(), nullptr, MemRef::Read);
break;
}
case Intrinsic::memset: {
MemSetInst *MSI = cast<MemSetInst>(&I);
// TODO: If the size is known, use it.
visitMemoryReference(I, MSI->getDest(), MemoryLocation::UnknownSize,
- MSI->getDestAlignment(), nullptr, MemRef::Write);
+ MSI->getAlignment(), nullptr, MemRef::Write);
break;
}
diff --git a/llvm/lib/CodeGen/CodeGenPrepare.cpp b/llvm/lib/CodeGen/CodeGenPrepare.cpp
index d0ff84fa6f3..de5c68f7767 100644
--- a/llvm/lib/CodeGen/CodeGenPrepare.cpp
+++ b/llvm/lib/CodeGen/CodeGenPrepare.cpp
@@ -1665,8 +1665,8 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) {
unsigned Align = getKnownAlignment(MI->getDest(), *DL);
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI))
Align = std::min(Align, getKnownAlignment(MTI->getSource(), *DL));
- if (Align > MI->getDestAlignment())
- MI->setDestAlignment(Align);
+ if (Align > MI->getAlignment())
+ MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), Align));
}
}
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 9d9b5dbb7d2..de0c0fba5f7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -4365,73 +4365,69 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
case Intrinsic::longjmp:
return &"_longjmp"[!TLI.usesUnderscoreLongJmp()];
case Intrinsic::memcpy: {
- const MemCpyInst &MemCpyI = cast<MemCpyInst>(I);
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(MemCpyI.getDestAddressSpace() < 256 &&
- MemCpyI.getSourceAddressSpace() < 256 &&
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
+ < 256 &&
"Unknown address space");
- SDValue Op1 = getValue(MemCpyI.getDest());
- SDValue Op2 = getValue(MemCpyI.getSource());
- SDValue Op3 = getValue(MemCpyI.getLength());
- // FIXME: Support passing different dest/src alignments to the memcpy
- // DAG node.
- unsigned Align = std::min(MemCpyI.getDestAlignment(),
- MemCpyI.getSrcAlignment());
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memcpy defines 0 and 1 to both mean no alignment.
- bool isVol = MemCpyI.isVolatile();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MC = DAG.getMemcpy(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
false, isTC,
- MachinePointerInfo(MemCpyI.getDest()),
- MachinePointerInfo(MemCpyI.getSource()));
+ MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1)));
updateDAGForMaybeTailCall(MC);
return nullptr;
}
case Intrinsic::memset: {
- const MemSetInst &MemSetI = cast<MemSetInst>(I);
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(MemSetI.getDestAddressSpace() < 256 &&
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
"Unknown address space");
- SDValue Op1 = getValue(MemSetI.getDest());
- SDValue Op2 = getValue(MemSetI.getValue());
- SDValue Op3 = getValue(MemSetI.getLength());
- unsigned Align = MemSetI.getDestAlignment();
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memset defines 0 and 1 to both mean no alignment.
- bool isVol = MemSetI.isVolatile();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MS = DAG.getMemset(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
- isTC, MachinePointerInfo(MemSetI.getDest()));
+ isTC, MachinePointerInfo(I.getArgOperand(0)));
updateDAGForMaybeTailCall(MS);
return nullptr;
}
case Intrinsic::memmove: {
- const MemMoveInst &MemMoveI = cast<MemMoveInst>(I);
// FIXME: this definition of "user defined address space" is x86-specific
// Assert for address < 256 since we support only user defined address
// spaces.
- assert(MemMoveI.getDestAddressSpace() < 256 &&
- MemMoveI.getSourceAddressSpace() < 256 &&
+ assert(cast<PointerType>(I.getArgOperand(0)->getType())->getAddressSpace()
+ < 256 &&
+ cast<PointerType>(I.getArgOperand(1)->getType())->getAddressSpace()
+ < 256 &&
"Unknown address space");
- SDValue Op1 = getValue(MemMoveI.getDest());
- SDValue Op2 = getValue(MemMoveI.getSource());
- SDValue Op3 = getValue(MemMoveI.getLength());
- // FIXME: Support passing different dest/src alignments to the memcpy
- // DAG node.
- unsigned Align = std::min(MemMoveI.getDestAlignment(),
- MemMoveI.getSrcAlignment());
+ SDValue Op1 = getValue(I.getArgOperand(0));
+ SDValue Op2 = getValue(I.getArgOperand(1));
+ SDValue Op3 = getValue(I.getArgOperand(2));
+ unsigned Align = cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
if (!Align)
Align = 1; // @llvm.memmove defines 0 and 1 to both mean no alignment.
- bool isVol = MemMoveI.isVolatile();
+ bool isVol = cast<ConstantInt>(I.getArgOperand(4))->getZExtValue();
bool isTC = I.isTailCall() && isInTailCallPosition(&I, DAG.getTarget());
SDValue MM = DAG.getMemmove(getRoot(), sdl, Op1, Op2, Op3, Align, isVol,
- isTC, MachinePointerInfo(MemMoveI.getDest()),
- MachinePointerInfo(MemMoveI.getSource()));
+ isTC, MachinePointerInfo(I.getArgOperand(0)),
+ MachinePointerInfo(I.getArgOperand(1)));
updateDAGForMaybeTailCall(MM);
return nullptr;
}
diff --git a/llvm/lib/IR/Attributes.cpp b/llvm/lib/IR/Attributes.cpp
index fe09c47fb48..bdefe5917fe 100644
--- a/llvm/lib/IR/Attributes.cpp
+++ b/llvm/lib/IR/Attributes.cpp
@@ -830,6 +830,11 @@ AttributeSet AttributeSet::removeAttributes(LLVMContext &C, unsigned Index,
if (!pImpl) return AttributeSet();
if (!Attrs.pImpl) return *this;
+ // FIXME it is not obvious how this should work for alignment.
+ // For now, say we can't pass in alignment, which no current use does.
+ assert(!Attrs.hasAttribute(Index, Attribute::Alignment) &&
+ "Attempt to change alignment!");
+
// Add the attribute slots before the one we're trying to add.
SmallVector<AttributeSet, 4> AttrSet;
uint64_t NumAttrs = pImpl->getNumAttributes();
diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index c83313fa654..12c354c89b2 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -144,36 +144,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
}
break;
}
- case 'm': {
- if (Name.startswith("memcpy.") && F->arg_size() == 5) {
- F->setName(Name + ".old");
- // Get the types of dest, src, and len.
- ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memcpy,
- ParamTypes);
- return true;
- }
- if (Name.startswith("memmove.") && F->arg_size() == 5) {
- F->setName(Name + ".old");
- // Get the types of dest, src, and len.
- ArrayRef<Type *> ParamTypes = F->getFunctionType()->params().slice(0, 3);
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memmove,
- ParamTypes);
- return true;
- }
- if (Name.startswith("memset.") && F->arg_size() == 5) {
- F->setName(Name + ".old");
- // Get the types of dest and len.
- Type *ParamTypes[2] = {
- F->getFunctionType()->getParamType(0),
- F->getFunctionType()->getParamType(2)
- };
- NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::memset,
- ParamTypes);
- return true;
- }
- break;
- }
case 'o':
// We only need to change the name to match the mangling including the
@@ -757,31 +727,6 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
CI->eraseFromParent();
return;
- case Intrinsic::memcpy:
- case Intrinsic::memmove:
- case Intrinsic::memset: {
- // Remove alignment argument (3), and add alignment attributes to the
- // dest/src pointers.
- Value *Args[4] = {
- CI->getArgOperand(0),
- CI->getArgOperand(1),
- CI->getArgOperand(2),
- CI->getArgOperand(4)
- };
- auto *MemCI = cast<MemIntrinsic>(Builder.CreateCall(NewFn, Args, Name));
-
- // All mem intrinsics support dest alignment.
- const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
- MemCI->setDestAlignment(Align->getZExtValue());
-
- // Memcpy/Memmove also support source alignment.
- if (auto *MemTransferI = dyn_cast<MemTransferInst>(MemCI))
- MemTransferI->setSrcAlignment(Align->getZExtValue());
- CI->replaceAllUsesWith(MemCI);
- CI->eraseFromParent();
- return;
- }
-
case Intrinsic::objectsize:
CI->replaceAllUsesWith(Builder.CreateCall(
NewFn, {CI->getArgOperand(0), CI->getArgOperand(1)}, Name));
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index b07f15515ad..44741293633 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -15,7 +15,6 @@
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/IRBuilder.h"
-#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
#include "llvm/IR/Statepoint.h"
@@ -80,11 +79,11 @@ static InvokeInst *createInvokeHelper(Value *Invokee, BasicBlock *NormalDest,
}
CallInst *IRBuilderBase::
-CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned DstAlign,
+CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned Align,
bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,
MDNode *NoAliasTag) {
Ptr = getCastedInt8PtrValue(Ptr);
- Value *Ops[] = { Ptr, Val, Size, getInt1(isVolatile) };
+ Value *Ops[] = { Ptr, Val, Size, getInt32(Align), getInt1(isVolatile) };
Type *Tys[] = { Ptr->getType(), Size->getType() };
Module *M = BB->getParent()->getParent();
Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
@@ -100,21 +99,18 @@ CreateMemSet(Value *Ptr, Value *Val, Value *Size, unsigned DstAlign,
if (NoAliasTag)
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
-
- cast<MemSetInst>(CI)->setDestAlignment(DstAlign);
return CI;
}
CallInst *IRBuilderBase::
-CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned DstAlign,
- IntegerAlignment SrcAlign,
+CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned Align,
bool isVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
MDNode *ScopeTag, MDNode *NoAliasTag) {
Dst = getCastedInt8PtrValue(Dst);
Src = getCastedInt8PtrValue(Src);
- Value *Ops[] = { Dst, Src, Size, getInt1(isVolatile) };
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
Module *M = BB->getParent()->getParent();
Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
@@ -134,23 +130,18 @@ CreateMemCpy(Value *Dst, Value *Src, Value *Size, unsigned DstAlign,
if (NoAliasTag)
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
-
- auto *MCI = cast<MemCpyInst>(CI);
- MCI->setDestAlignment(DstAlign);
- MCI->setSrcAlignment(SrcAlign);
return CI;
}
CallInst *IRBuilderBase::
-CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned DstAlign,
- IntegerAlignment SrcAlign,
+CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned Align,
bool isVolatile, MDNode *TBAATag, MDNode *ScopeTag,
MDNode *NoAliasTag) {
Dst = getCastedInt8PtrValue(Dst);
Src = getCastedInt8PtrValue(Src);
- Value *Ops[] = { Dst, Src, Size, getInt1(isVolatile) };
+ Value *Ops[] = { Dst, Src, Size, getInt32(Align), getInt1(isVolatile) };
Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
Module *M = BB->getParent()->getParent();
Value *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
@@ -166,10 +157,6 @@ CreateMemMove(Value *Dst, Value *Src, Value *Size, unsigned DstAlign,
if (NoAliasTag)
CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
-
- auto *MMI = cast<MemMoveInst>(CI);
- MMI->setDestAlignment(DstAlign);
- MMI->setSrcAlignment(SrcAlign);
return CI;
}
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 86e30620dec..cf7b4cac342 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -3511,7 +3511,7 @@ void Verifier::visitIntrinsicCallSite(Intrinsic::ID ID, CallSite CS) {
const APInt &AlignVal = AlignCI->getValue();
Assert(AlignCI->isZero() || AlignVal.isPowerOf2(),
"alignment argument of memory intrinsics must be a power of 2", CS);
- Assert(isa<ConstantInt>(CS.getArgOperand(3)),
+ Assert(isa<ConstantInt>(CS.getArgOperand(4)),
"isvolatile argument of memory intrinsics must be a constant int",
CS);
break;
diff --git a/llvm/lib/Target/AArch64/AArch64FastISel.cpp b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
index efab048e00a..284f5263f90 100644
--- a/llvm/lib/Target/AArch64/AArch64FastISel.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FastISel.cpp
@@ -3379,8 +3379,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// Small memcpy's are common enough that we want to do them without a call
// if possible.
uint64_t Len = cast<ConstantInt>(MTI->getLength())->getZExtValue();
- unsigned Alignment = std::min(MTI->getDestAlignment(),
- MTI->getSrcAlignment());
+ unsigned Alignment = MTI->getAlignment();
if (isMemCpySmall(Len, Alignment)) {
Address Dest, Src;
if (!computeAddress(MTI->getRawDest(), Dest) ||
@@ -3400,7 +3399,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+ return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -3416,7 +3415,7 @@ bool AArch64FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
// address spaces.
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
case Intrinsic::sin:
case Intrinsic::cos:
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
index d8238f73e20..87d50d58705 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
@@ -400,15 +400,15 @@ void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
case Intrinsic::memcpy: {
MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
- MemCpy->getLength(), MemCpy->getDestAlignment(),
- MemCpy->getSrcAlignment(), MemCpy->isVolatile());
+ MemCpy->getLength(), MemCpy->getAlignment(),
+ MemCpy->isVolatile());
Intr->eraseFromParent();
continue;
}
case Intrinsic::memset: {
MemSetInst *MemSet = cast<MemSetInst>(Intr);
Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
- MemSet->getLength(), MemSet->getDestAlignment(),
+ MemSet->getLength(), MemSet->getAlignment(),
MemSet->isVolatile());
Intr->eraseFromParent();
continue;
diff --git a/llvm/lib/Target/ARM/ARMFastISel.cpp b/llvm/lib/Target/ARM/ARMFastISel.cpp
index ce928289408..175107450fc 100644
--- a/llvm/lib/Target/ARM/ARMFastISel.cpp
+++ b/llvm/lib/Target/ARM/ARMFastISel.cpp
@@ -2328,8 +2328,8 @@ bool ARMFastISel::SelectCall(const Instruction *I,
for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
i != e; ++i) {
// If we're lowering a memory intrinsic instead of a regular call, skip the
- // last argument, which shouldn't be passed to the underlying function.
- if (IntrMemName && e-i <= 1)
+ // last two arguments, which shouldn't be passed to the underlying function.
+ if (IntrMemName && e-i <= 2)
break;
ISD::ArgFlagsTy Flags;
@@ -2527,8 +2527,7 @@ bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
!ARMComputeAddress(MTI.getRawSource(), Src))
return false;
- unsigned Alignment = std::min(MTI.getDestAlignment(),
- MTI.getSrcAlignment());
+ unsigned Alignment = MTI.getAlignment();
if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
return true;
}
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index f5bf36dea5e..e9eaf810637 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1403,7 +1403,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (!MTI->getLength()->getType()->isIntegerTy(32))
return false;
const char *IntrMemName = isa<MemCpyInst>(II) ? "memcpy" : "memmove";
- return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 1);
+ return lowerCallTo(II, IntrMemName, II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -1412,7 +1412,7 @@ bool MipsFastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
return false;
if (!MSI->getLength()->getType()->isIntegerTy(32))
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
}
return false;
diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp
index 886e6226e43..914fd04ad6b 100644
--- a/llvm/lib/Target/X86/X86FastISel.cpp
+++ b/llvm/lib/Target/X86/X86FastISel.cpp
@@ -2409,7 +2409,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (MCI->getSourceAddressSpace() > 255 || MCI->getDestAddressSpace() > 255)
return false;
- return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 1);
+ return lowerCallTo(II, "memcpy", II->getNumArgOperands() - 2);
}
case Intrinsic::memset: {
const MemSetInst *MSI = cast<MemSetInst>(II);
@@ -2424,7 +2424,7 @@ bool X86FastISel::fastLowerIntrinsicCall(const IntrinsicInst *II) {
if (MSI->getDestAddressSpace() > 255)
return false;
- return lowerCallTo(II, "memset", II->getNumArgOperands() - 1);
+ return lowerCallTo(II, "memset", II->getNumArgOperands() - 2);
}
case Intrinsic::stackprotector: {
// Emit code to store the stack guard onto the stack.
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index 2bc96cce42c..cde26cc24c2 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -60,18 +60,14 @@ static Type *reduceToSingleValueType(Type *T) {
return T;
}
-Instruction *InstCombiner::SimplifyMemTransfer(MemTransferInst *MI) {
+Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
unsigned DstAlign = getKnownAlignment(MI->getArgOperand(0), DL, MI, AC, DT);
unsigned SrcAlign = getKnownAlignment(MI->getArgOperand(1), DL, MI, AC, DT);
- unsigned CopyDestAlign = MI->getDestAlignment();
- unsigned CopySrcAlign = MI->getSrcAlignment();
+ unsigned MinAlign = std::min(DstAlign, SrcAlign);
+ unsigned CopyAlign = MI->getAlignment();
- if (CopyDestAlign < DstAlign) {
- MI->setDestAlignment(DstAlign);
- return MI;
- }
- if (CopySrcAlign < SrcAlign) {
- MI->setSrcAlignment(SrcAlign);
+ if (CopyAlign < MinAlign) {
+ MI->setAlignment(ConstantInt::get(MI->getAlignmentType(), MinAlign, false));
return MI;
}
@@ -139,8 +135,8 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemTransferInst *MI) {
// If the memcpy/memmove provides better alignment info than we can
// infer, use it.
- SrcAlign = std::max(SrcAlign, CopySrcAlign);
- DstAlign = std::max(DstAlign, CopyDestAlign);
+ SrcAlign = std::max(SrcAlign, CopyAlign);
+ DstAlign = std::max(DstAlign, CopyAlign);
Value *Src = Builder->CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
Value *Dest = Builder->CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
@@ -160,8 +156,9 @@ Instruction *InstCombiner::SimplifyMemTransfer(MemTransferInst *MI) {
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
unsigned Alignment = getKnownAlignment(MI->getDest(), DL, MI, AC, DT);
- if (MI->getDestAlignment() < Alignment) {
- MI->setDestAlignment(Alignment);
+ if (MI->getAlignment() < Alignment) {
+ MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
+ Alignment, false));
return MI;
}
@@ -171,7 +168,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
return nullptr;
uint64_t Len = LenC->getLimitedValue();
- Alignment = MI->getDestAlignment();
+ Alignment = MI->getAlignment();
assert(Len && "0-sized memory setting should be removed already.");
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
@@ -746,8 +743,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
// If we can determine a pointer alignment that is bigger than currently
// set, update the alignment.
- if (auto *MTI = dyn_cast<MemTransferInst>(MI)) {
- if (Instruction *I = SimplifyMemTransfer(MTI))
+ if (isa<MemTransferInst>(MI)) {
+ if (Instruction *I = SimplifyMemTransfer(MI))
return I;
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
if (Instruction *I = SimplifyMemSet(MSI))
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 4b167021307..1bb3ad6c534 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -558,7 +558,7 @@ private:
Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
Instruction *MatchBSwap(BinaryOperator &I);
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
- Instruction *SimplifyMemTransfer(MemTransferInst *MI);
+ Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
Instruction *SimplifyMemSet(MemSetInst *MI);
Value *EvaluateInDifferentType(Value *V, Type *Ty, bool isSigned);
diff --git a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
index 2a9749d65c2..21ef3207e89 100644
--- a/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp
@@ -1356,21 +1356,20 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
Value *LenShadow = IRB.CreateMul(
I.getLength(),
ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
- Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
- DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
- SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
- auto *MTI = cast<MemTransferInst>(IRB.CreateCall(I.getCalledValue(),
- { DestShadow, SrcShadow,
- LenShadow,
- I.getVolatileCst() }));
-
+ Value *AlignShadow;
if (ClPreserveAlignment) {
- MTI->setDestAlignment(I.getDestAlignment() * (DFSF.DFS.ShadowWidth / 8));
- MTI->setSrcAlignment(I.getSrcAlignment() * (DFSF.DFS.ShadowWidth / 8));
+ AlignShadow = IRB.CreateMul(I.getAlignmentCst(),
+ ConstantInt::get(I.getAlignmentCst()->getType(),
+ DFSF.DFS.ShadowWidth / 8));
} else {
- MTI->setDestAlignment(DFSF.DFS.ShadowWidth / 8);
- MTI->setSrcAlignment(DFSF.DFS.ShadowWidth / 8);
+ AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(),
+ DFSF.DFS.ShadowWidth / 8);
}
+ Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
+ DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
+ SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
+ IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow,
+ AlignShadow, I.getVolatileCst()});
}
void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index 836995fa3af..218e3e96c23 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -1117,7 +1117,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
Value *Cpy = EntryIRB.CreateMemCpy(
getShadowPtr(V, EntryIRB.getInt8Ty(), EntryIRB), Base, Size,
- CopyAlign, CopyAlign);
+ CopyAlign);
DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
(void)Cpy;
}
@@ -2482,7 +2482,7 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
unsigned Alignment = std::min(ParamAlignment, kShadowTLSAlignment);
Store = IRB.CreateMemCpy(ArgShadowBase,
getShadowPtr(A, Type::getInt8Ty(*MS.C), IRB),
- Size, Alignment, Alignment);
+ Size, Alignment);
} else {
Size = DL.getTypeAllocSize(A->getType());
if (ArgOffset + Size > kParamTLSSize) break;
@@ -2834,7 +2834,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *Base = getShadowPtrForVAArgument(RealTy, IRB, OverflowOffset);
OverflowOffset += RoundUpToAlignment(ArgSize, 8);
IRB.CreateMemCpy(Base, MSV.getShadowPtr(A, IRB.getInt8Ty(), IRB),
- ArgSize, kShadowTLSAlignment, kShadowTLSAlignment);
+ ArgSize, kShadowTLSAlignment);
} else {
ArgKind AK = classifyArgument(A);
if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
@@ -2912,7 +2912,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
VAArgOverflowSize);
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
- IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8, 8);
+ IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
}
// Instrument va_start.
@@ -2931,7 +2931,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
Value *RegSaveAreaShadowPtr =
MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy,
- AMD64FpEndOffset, 16, 16);
+ AMD64FpEndOffset, 16);
Value *OverflowArgAreaPtrPtr =
IRB.CreateIntToPtr(
@@ -2943,8 +2943,7 @@ struct VarArgAMD64Helper : public VarArgHelper {
MSV.getShadowPtr(OverflowArgAreaPtr, IRB.getInt8Ty(), IRB);
Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
AMD64FpEndOffset);
- IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize,
- 16, 16);
+ IRB.CreateMemCpy(OverflowArgAreaShadowPtr, SrcPtr, VAArgOverflowSize, 16);
}
}
};
@@ -3030,7 +3029,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
// If there is a va_start in this function, make a backup copy of
// va_arg_tls somewhere in the function entry block.
VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
- IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8, 8);
+ IRB.CreateMemCpy(VAArgTLSCopy, MS.VAArgTLS, CopySize, 8);
}
// Instrument va_start.
@@ -3045,7 +3044,7 @@ struct VarArgMIPS64Helper : public VarArgHelper {
Value *RegSaveAreaPtr = IRB.CreateLoad(RegSaveAreaPtrPtr);
Value *RegSaveAreaShadowPtr =
MSV.getShadowPtr(RegSaveAreaPtr, IRB.getInt8Ty(), IRB);
- IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8, 8);
+ IRB.CreateMemCpy(RegSaveAreaShadowPtr, VAArgTLSCopy, CopySize, 8);
}
}
};
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
index f041a296684..a13e552cbd0 100644
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp
@@ -347,8 +347,6 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
// instruction, but only for one operand, save it. If we reach the
// other operand through another assumption later, then we may
// change the alignment at that point.
- // FIXME: The above statement is no longer true. Fix the code below
- // to be able to reason about different dest/src alignments.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
MTI->getSource(), SE);
@@ -378,23 +376,20 @@ bool AlignmentFromAssumptions::processAssumption(CallInst *ACall) {
if (AltSrcAlignment <= std::max(NewDestAlignment, AltDestAlignment))
NewAlignment = std::max(NewAlignment, AltSrcAlignment);
- if (NewAlignment > MTI->getDestAlignment()) {
- MTI->setDestAlignment(NewAlignment);
- ++NumMemIntAlignChanged;
- }
-
- if (NewAlignment > MTI->getSrcAlignment()) {
- MTI->setSrcAlignment(NewAlignment);
+ if (NewAlignment > MI->getAlignment()) {
+ MI->setAlignment(ConstantInt::get(Type::getInt32Ty(
+ MI->getParent()->getContext()), NewAlignment));
++NumMemIntAlignChanged;
}
NewDestAlignments.insert(std::make_pair(MTI, NewDestAlignment));
NewSrcAlignments.insert(std::make_pair(MTI, NewSrcAlignment));
- } else if (NewDestAlignment > MI->getDestAlignment()) {
+ } else if (NewDestAlignment > MI->getAlignment()) {
assert((!isa<MemIntrinsic>(MI) || isa<MemSetInst>(MI)) &&
"Unknown memory intrinsic");
- MI->setDestAlignment(NewDestAlignment);
+ MI->setAlignment(ConstantInt::get(Type::getInt32Ty(
+ MI->getParent()->getContext()), NewDestAlignment));
++NumMemIntAlignChanged;
}
}
diff --git a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
index b0e6d19c505..36ad0a5f7b9 100644
--- a/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp
@@ -611,7 +611,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// as any store/memset/memcpy is likely using vector instructions so
// shortening it to not vector size is likely to be slower
MemIntrinsic* DepIntrinsic = cast<MemIntrinsic>(DepWrite);
- unsigned DepWriteAlign = DepIntrinsic->getDestAlignment();
+ unsigned DepWriteAlign = DepIntrinsic->getAlignment();
if (llvm::isPowerOf2_64(InstWriteOffset) ||
((DepWriteAlign != 0) && InstWriteOffset % DepWriteAlign == 0)) {
diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
index aed43b39d64..c2fb8cd49b7 100644
--- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp
@@ -414,8 +414,8 @@ bool LoopIdiomRecognize::processLoopMemSet(MemSetInst *MSI,
return false;
return processLoopStridedStore(Pointer, (unsigned)SizeInBytes,
- MSI->getDestAlignment(), MSI->getValue(), MSI,
- Ev, BECount, /*NegStride=*/false);
+ MSI->getAlignment(), MSI->getValue(), MSI, Ev,
+ BECount, /*NegStride=*/false);
}
/// mayLoopAccessLocation - Return true if the specified loop might access the
@@ -700,7 +700,7 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(
CallInst *NewCall =
Builder.CreateMemCpy(StoreBasePtr, LoadBasePtr, NumBytes,
- SI->getAlignment(), LI->getAlignment());
+ std::min(SI->getAlignment(), LI->getAlignment()));
NewCall->setDebugLoc(SI->getDebugLoc());
DEBUG(dbgs() << " Formed memcpy: " << *NewCall << "\n"
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 2db2b802f08..f80b07bf219 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -229,8 +229,7 @@ public:
void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
- addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(),
- MSI);
+ addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getAlignment(), MSI);
}
void addRange(int64_t Start, int64_t Size, Value *Ptr,
@@ -820,17 +819,20 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) {
// If all checks passed, then we can transform M.
+ // Make sure to use the lesser of the alignment of the source and the dest
+ // since we're changing where we're reading from, but don't want to increase
+ // the alignment past what can be read from or written to.
// TODO: Is this worth it if we're creating a less aligned memcpy? For
// example we could be moving from movaps -> movq on x86.
+ unsigned Align = std::min(MDep->getAlignment(), M->getAlignment());
+
IRBuilder<> Builder(M);
if (UseMemMove)
Builder.CreateMemMove(M->getRawDest(), MDep->getRawSource(), M->getLength(),
- M->getDestAlignment(), MDep->getSrcAlignment(),
- M->isVolatile());
+ Align, M->isVolatile());
else
Builder.CreateMemCpy(M->getRawDest(), MDep->getRawSource(), M->getLength(),
- M->getDestAlignment(), MDep->getSrcAlignment(),
- M->isVolatile());
+ Align, M->isVolatile());
// Remove the instruction we're replacing.
MD->removeInstruction(M);
@@ -876,7 +878,7 @@ bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
// If Dest is aligned, and SrcSize is constant, use the minimum alignment
// of the sum.
const unsigned DestAlign =
- std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
+ std::max(MemSet->getAlignment(), MemCpy->getAlignment());
if (DestAlign > 1)
if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
@@ -933,7 +935,7 @@ bool MemCpyOpt::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
IRBuilder<> Builder(MemCpy);
Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
- CopySize, MemCpy->getDestAlignment());
+ CopySize, MemCpy->getAlignment());
return true;
}
@@ -959,7 +961,7 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
if (Value *ByteVal = isBytewiseValue(GV->getInitializer())) {
IRBuilder<> Builder(M);
Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
- M->getDestAlignment(), false);
+ M->getAlignment(), false);
MD->removeInstruction(M);
M->eraseFromParent();
++NumCpyToSet;
@@ -988,11 +990,8 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
// d) memcpy from a just-memset'd source can be turned into memset.
if (DepInfo.isClobber()) {
if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
- // FIXME: Can we pass in either of dest/src alignment here instead of
- // convervatively taking the minimum?
- unsigned Align = std::min(M->getDestAlignment(), M->getSrcAlignment());
if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
- CopySize->getZExtValue(), Align,
+ CopySize->getZExtValue(), M->getAlignment(),
C)) {
MD->removeInstruction(M);
M->eraseFromParent();
@@ -1109,11 +1108,7 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) {
getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
*CS->getParent()->getParent());
DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
- // FIXME: Can we use either of dest/src alignment here instead of
- // convervatively taking the minimum?
- unsigned MinAlign = std::min(MDep->getDestAlignment(),
- MDep->getSrcAlignment());
- if (MinAlign < ByValAlign &&
+ if (MDep->getAlignment() < ByValAlign &&
getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
CS.getInstruction(), &AC, &DT) < ByValAlign)
return false;
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 223c531b7d7..bfd48ff15f6 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2618,7 +2618,8 @@ private:
assert(!IsSplit);
assert(NewBeginOffset == BeginOffset);
II.setDest(getNewAllocaSlicePtr(IRB, OldPtr->getType()));
- II.setDestAlignment(getSliceAlign());
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(ConstantInt::get(CstTy, getSliceAlign()));
deleteIfTriviallyDead(OldPtr);
return false;
@@ -2734,16 +2735,15 @@ private:
// update both source and dest of a single call.
if (!IsSplittable) {
Value *AdjustedPtr = getNewAllocaSlicePtr(IRB, OldPtr->getType());
- if (IsDest) {
+ if (IsDest)
II.setDest(AdjustedPtr);
-
- if (II.getDestAlignment() > SliceAlign)
- II.setDestAlignment(MinAlign(II.getDestAlignment(), SliceAlign));
- } else {
+ else
II.setSource(AdjustedPtr);
- if (II.getSrcAlignment() > SliceAlign)
- II.setSrcAlignment(MinAlign(II.getSrcAlignment(), SliceAlign));
+ if (II.getAlignment() > SliceAlign) {
+ Type *CstTy = II.getAlignmentCst()->getType();
+ II.setAlignment(
+ ConstantInt::get(CstTy, MinAlign(II.getAlignment(), SliceAlign)));
}
DEBUG(dbgs() << " to: " << II << "\n");
@@ -2796,10 +2796,8 @@ private:
// Compute the relative offset for the other pointer within the transfer.
unsigned IntPtrWidth = DL.getPointerSizeInBits(OtherAS);
APInt OtherOffset(IntPtrWidth, NewBeginOffset - BeginOffset);
- unsigned OtherDestAlign = MinAlign(II.getDestAlignment() ? II.getDestAlignment() : 1,
- OtherOffset.zextOrTrunc(64).getZExtValue());
- unsigned OtherSrcAlign = MinAlign(II.getSrcAlignment() ? II.getSrcAlignment() : 1,
- OtherOffset.zextOrTrunc(64).getZExtValue());
+ unsigned OtherAlign = MinAlign(II.getAlignment() ? II.getAlignment() : 1,
+ OtherOffset.zextOrTrunc(64).getZExtValue());
if (EmitMemCpy) {
// Compute the other pointer, folding as much as possible to produce
@@ -2811,11 +2809,9 @@ private:
Type *SizeTy = II.getLength()->getType();
Constant *Size = ConstantInt::get(SizeTy, NewEndOffset - NewBeginOffset);
- CallInst *New = IRB.CreateMemCpy(IsDest ? OurPtr : OtherPtr,
- IsDest ? OtherPtr : OurPtr, Size,
- MinAlign(SliceAlign, OtherDestAlign),
- MinAlign(SliceAlign, OtherSrcAlign),
- II.isVolatile());
+ CallInst *New = IRB.CreateMemCpy(
+ IsDest ? OurPtr : OtherPtr, IsDest ? OtherPtr : OurPtr, Size,
+ MinAlign(SliceAlign, OtherAlign), II.isVolatile());
(void)New;
DEBUG(dbgs() << " to: " << *New << "\n");
return false;
@@ -2847,7 +2843,7 @@ private:
Value *SrcPtr = getAdjustedPtr(IRB, DL, OtherPtr, OtherOffset, OtherPtrTy,
OtherPtr->getName() + ".");
- unsigned SrcAlign = OtherSrcAlign;
+ unsigned SrcAlign = OtherAlign;
Value *DstPtr = &NewAI;
unsigned DstAlign = SliceAlign;
if (!IsDest) {
diff --git a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
index 679e241d971..114d22ddf2e 100644
--- a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
+++ b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp
@@ -716,7 +716,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy);
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
- SrcVal->setAlignment(MTI->getSrcAlignment());
+ SrcVal->setAlignment(MTI->getAlignment());
Builder.CreateStore(SrcVal, NewAI);
} else if (GetUnderlyingObject(MTI->getDest(), DL, 0) != OrigAI) {
// Src must be OrigAI, change this to be a load from NewAI then a store
@@ -733,7 +733,7 @@ void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy);
StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
- NewStore->setAlignment(MTI->getDestAlignment());
+ NewStore->setAlignment(MTI->getAlignment());
} else {
// Noop transfer. Src == Dst
}
@@ -2182,8 +2182,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// that doesn't have anything to do with the alloca that we are promoting. For
// memset, this Value* stays null.
Value *OtherPtr = nullptr;
- unsigned DestMemAlignment = MI->getDestAlignment();
- unsigned SrcMemAlignment = 0;
+ unsigned MemAlignment = MI->getAlignment();
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
if (Inst == MTI->getRawDest())
OtherPtr = MTI->getRawSource();
@@ -2191,7 +2190,6 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
assert(Inst == MTI->getRawSource());
OtherPtr = MTI->getRawDest();
}
- SrcMemAlignment = MTI->getSrcAlignment();
}
// If there is an other pointer, we want to convert it to the same pointer
@@ -2237,8 +2235,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
// If this is a memcpy/memmove, emit a GEP of the other element address.
Value *OtherElt = nullptr;
- unsigned OtherDestEltAlign = DestMemAlignment;
- unsigned OtherSrcEltAlign = SrcMemAlignment;
+ unsigned OtherEltAlign = MemAlignment;
if (OtherPtr) {
Value *Idx[2] = { Zero,
@@ -2261,8 +2258,7 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
// mem intrinsic and the alignment of the element. If the alignment of
// the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
// known alignment is just 4 bytes.
- OtherDestEltAlign = (unsigned)MinAlign(OtherDestEltAlign, EltOffset);
- OtherSrcEltAlign = (unsigned)MinAlign(OtherSrcEltAlign, EltOffset);
+ OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
}
Value *EltPtr = NewElts[i];
@@ -2273,13 +2269,12 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
if (isa<MemTransferInst>(MI)) {
if (SROADest) {
// From Other to Alloca.
- Value *Elt = new LoadInst(OtherElt, "tmp", false,
- OtherSrcEltAlign, MI);
+ Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
new StoreInst(Elt, EltPtr, MI);
} else {
// From Alloca to Other.
Value *Elt = new LoadInst(EltPtr, "tmp", MI);
- new StoreInst(Elt, OtherElt, false, OtherDestEltAlign, MI);
+ new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
}
continue;
}
@@ -2342,11 +2337,9 @@ SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr
if (isa<MemCpyInst>(MI))
- Builder.CreateMemCpy(Dst, Src, EltSize, OtherDestEltAlign,
- OtherSrcEltAlign, MI->isVolatile());
+ Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile());
else
- Builder.CreateMemMove(Dst, Src, EltSize, OtherDestEltAlign,
- OtherSrcEltAlign, MI->isVolatile());
+ Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile());
}
}
DeadInsts.push_back(MI);
diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp
index 7cb7c3ab54b..a31131bd4ac 100644
--- a/llvm/lib/Transforms/Utils/InlineFunction.cpp
+++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp
@@ -851,7 +851,7 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
// Always generate a memcpy of alignment 1 here because we don't know
// the alignment of the src pointer. Other optimizations can infer
// better alignment.
- Builder.CreateMemCpy(Dst, Src, Size, /*DestAlign=*/1, /*SrcAlign=*/1);
+ Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
}
/// When inlining a call site that has a byval argument,
diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index 72abd0b3329..91c56c2e231 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -238,7 +238,7 @@ Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len,
// concatenation for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(CpyDst, Src,
ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1),
- 1, 1);
+ 1);
return Dst;
}
@@ -471,8 +471,7 @@ Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilder<> &B) {
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
B.CreateMemCpy(Dst, Src,
- ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len), 1,
- 1);
+ ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len), 1);
return Dst;
}
@@ -499,7 +498,7 @@ Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilder<> &B) {
// We have enough information to now generate the memcpy call to do the
// copy for us. Make a memcpy to copy the nul byte with align = 1.
- B.CreateMemCpy(Dst, Src, LenV, 1, 1);
+ B.CreateMemCpy(Dst, Src, LenV, 1);
return DstEnd;
}
@@ -539,7 +538,7 @@ Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilder<> &B) {
Type *PT = Callee->getFunctionType()->getParamType(0);
// strncpy(x, s, c) -> memcpy(x, s, c, 1) [s and c are constant]
- B.CreateMemCpy(Dst, Src, ConstantInt::get(DL.getIntPtrType(PT), Len), 1, 1);
+ B.CreateMemCpy(Dst, Src, ConstantInt::get(DL.getIntPtrType(PT), Len), 1);
return Dst;
}
@@ -918,7 +917,7 @@ Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilder<> &B) {
// memcpy(x, y, n) -> llvm.memcpy(x, y, n, 1)
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, 1);
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
@@ -930,7 +929,7 @@ Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilder<> &B) {
// memmove(x, y, n) -> llvm.memmove(x, y, n, 1)
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, 1);
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
@@ -1766,7 +1765,7 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) {
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
ConstantInt::get(DL.getIntPtrType(CI->getContext()),
FormatStr.size() + 1),
- 1, 1); // Copy the null byte.
+ 1); // Copy the null byte.
return ConstantInt::get(CI->getType(), FormatStr.size());
}
@@ -1800,7 +1799,7 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) {
return nullptr;
Value *IncLen =
B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc");
- B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(2), IncLen, 1, 1);
+ B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(2), IncLen, 1);
// The sprintf result is the unincremented number of bytes in the string.
return B.CreateIntCast(Len, CI->getType(), false);
@@ -2337,7 +2336,7 @@ Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, IRBuilder<> &
if (isFortifiedCallFoldable(CI, 3, 2, false)) {
B.CreateMemCpy(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, 1);
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
return nullptr;
@@ -2351,7 +2350,7 @@ Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, IRBuilder<>
if (isFortifiedCallFoldable(CI, 3, 2, false)) {
B.CreateMemMove(CI->getArgOperand(0), CI->getArgOperand(1),
- CI->getArgOperand(2), 1, 1);
+ CI->getArgOperand(2), 1);
return CI->getArgOperand(0);
}
return nullptr;
OpenPOWER on IntegriCloud