diff options
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/SafeStack.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/IR/Core.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/IR/Function.cpp | 5 | ||||
-rw-r--r-- | llvm/lib/IR/Instructions.cpp | 18 | ||||
-rw-r--r-- | llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp | 8 | ||||
-rw-r--r-- | llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp | 10 | ||||
-rw-r--r-- | llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp | 7 | ||||
-rw-r--r-- | llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp | 67 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp | 6 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp | 36 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/InlineFunction.cpp | 3 | ||||
-rw-r--r-- | llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp | 59 |
13 files changed, 119 insertions, 116 deletions
diff --git a/llvm/lib/CodeGen/SafeStack.cpp b/llvm/lib/CodeGen/SafeStack.cpp index 80c66ccd781..8aa488e6391 100644 --- a/llvm/lib/CodeGen/SafeStack.cpp +++ b/llvm/lib/CodeGen/SafeStack.cpp @@ -563,7 +563,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( for (Argument *Arg : ByValArguments) { unsigned Offset = SSL.getObjectOffset(Arg); - unsigned Align = SSL.getObjectAlignment(Arg); + MaybeAlign Align(SSL.getObjectAlignment(Arg)); Type *Ty = Arg->getType()->getPointerElementType(); uint64_t Size = DL.getTypeStoreSize(Ty); @@ -580,7 +580,7 @@ Value *SafeStack::moveStaticAllocasToUnsafeStack( DIExpression::ApplyOffset, -Offset); Arg->replaceAllUsesWith(NewArg); IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode()); - IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlignment(), Size); + IRB.CreateMemCpy(Off, Align, Arg, Arg->getParamAlign(), Size); } // Allocate space for every unsafe static AllocaInst on the unsafe stack. diff --git a/llvm/lib/IR/Core.cpp b/llvm/lib/IR/Core.cpp index 84e279b8bbc..63b98f26ba1 100644 --- a/llvm/lib/IR/Core.cpp +++ b/llvm/lib/IR/Core.cpp @@ -3450,8 +3450,8 @@ LLVMValueRef LLVMBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { - return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), DstAlign, - unwrap(Src), SrcAlign, + return wrap(unwrap(B)->CreateMemCpy(unwrap(Dst), MaybeAlign(DstAlign), + unwrap(Src), MaybeAlign(SrcAlign), unwrap(Size))); } @@ -3459,8 +3459,8 @@ LLVMValueRef LLVMBuildMemMove(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, LLVMValueRef Size) { - return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), DstAlign, - unwrap(Src), SrcAlign, + return wrap(unwrap(B)->CreateMemMove(unwrap(Dst), MaybeAlign(DstAlign), + unwrap(Src), MaybeAlign(SrcAlign), unwrap(Size))); } diff --git a/llvm/lib/IR/Function.cpp b/llvm/lib/IR/Function.cpp index fcf893bd4a3..54612250b0d 100644 --- a/llvm/lib/IR/Function.cpp +++ b/llvm/lib/IR/Function.cpp @@ -126,6 +126,11 @@ unsigned Argument::getParamAlignment() const { return getParent()->getParamAlignment(getArgNo()); } +MaybeAlign Argument::getParamAlign() const { + assert(getType()->isPointerTy() && "Only pointers have alignments"); + return getParent()->getParamAlign(getArgNo()); +} + Type *Argument::getParamByValType() const { assert(getType()->isPointerTy() && "Only pointers have byval types"); return getParent()->getParamByValType(getArgNo()); diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp index 2a49a758a46..c264277fa53 100644 --- a/llvm/lib/IR/Instructions.cpp +++ b/llvm/lib/IR/Instructions.cpp @@ -1342,11 +1342,7 @@ void LoadInst::setAlignment(MaybeAlign Align) { "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | (encode(Align) << 1)); - if (Align) - assert(getAlignment() == Align->value() && - "Alignment representation error!"); - else - assert(getAlignment() == 0 && "Alignment representation error!"); + assert(getAlign() == Align && "Alignment representation error!"); } //===----------------------------------------------------------------------===// @@ -1416,16 +1412,12 @@ StoreInst::StoreInst(Value *val, Value *addr, bool isVolatile, MaybeAlign Align, AssertOK(); } -void StoreInst::setAlignment(MaybeAlign Align) { - assert((!Align || *Align <= MaximumAlignment) && +void StoreInst::setAlignment(MaybeAlign Alignment) { + assert((!Alignment || *Alignment <= MaximumAlignment) && "Alignment is greater than MaximumAlignment!"); setInstructionSubclassData((getSubclassDataFromInstruction() & ~(31 << 1)) | - (encode(Align) << 1)); - if (Align) - assert(getAlignment() == Align->value() && - "Alignment representation error!"); - else - assert(getAlignment() == 0 && "Alignment representation error!"); + (encode(Alignment) << 1)); + assert(getAlign() == Alignment && "Alignment representation error!"); } //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp index 0b5eb4b2dfc..14958a180ce 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp @@ -884,16 +884,16 @@ bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) { continue; case Intrinsic::memcpy: { MemCpyInst *MemCpy = cast<MemCpyInst>(Intr); - Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(), - MemCpy->getRawSource(), MemCpy->getSourceAlignment(), + Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlign(), + MemCpy->getRawSource(), MemCpy->getSourceAlign(), MemCpy->getLength(), MemCpy->isVolatile()); Intr->eraseFromParent(); continue; } case Intrinsic::memmove: { MemMoveInst *MemMove = cast<MemMoveInst>(Intr); - Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(), - MemMove->getRawSource(), MemMove->getSourceAlignment(), + Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlign(), + MemMove->getRawSource(), MemMove->getSourceAlign(), MemMove->getLength(), MemMove->isVolatile()); Intr->eraseFromParent(); continue; diff --git a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp index 3692edf5502..0a8ef4c5e53 100644 --- a/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp +++ b/llvm/lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp @@ -2275,14 +2275,12 @@ CleanupAndExit: : CondBuilder.CreateBitCast(LoadBasePtr, Int32PtrTy); NewCall = CondBuilder.CreateCall(Fn, {Op0, Op1, NumWords}); } else { - NewCall = CondBuilder.CreateMemMove(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = CondBuilder.CreateMemMove( + StoreBasePtr, SI->getAlign(), LoadBasePtr, LI->getAlign(), NumBytes); } } else { - NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), - NumBytes); + NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, + LI->getAlign(), NumBytes); // Okay, the memcpy has been formed. Zap the original store and // anything that feeds into it. RecursivelyDeleteTriviallyDeadInstructions(SI, TLI); diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp index c7e708127a4..79c119489a6 100644 --- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp @@ -2901,15 +2901,14 @@ void FunctionStackPoisoner::copyArgsPassedByValToAllocas() { for (Argument &Arg : F.args()) { if (Arg.hasByValAttr()) { Type *Ty = Arg.getType()->getPointerElementType(); - unsigned Alignment = Arg.getParamAlignment(); - if (Alignment == 0) - Alignment = DL.getABITypeAlignment(Ty); + const Align Alignment = + DL.getValueOrABITypeAlignment(Arg.getParamAlign(), Ty); AllocaInst *AI = IRB.CreateAlloca( Ty, nullptr, (Arg.hasName() ? Arg.getName() : "Arg" + Twine(Arg.getArgNo())) + ".byval"); - AI->setAlignment(Align(Alignment)); + AI->setAlignment(Alignment); Arg.replaceAllUsesWith(AI); uint64_t AllocSize = DL.getTypeAllocSize(Ty); diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp index 09d5a49e203..645bcad9ee6 100644 --- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp +++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp @@ -1634,8 +1634,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { Size, ArgAlign); } else { const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment); - Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign.value(), - Base, CopyAlign.value(), Size); + Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base, + CopyAlign, Size); LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n"); (void)Cpy; } @@ -3327,9 +3327,8 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> { /*isStore*/ false) .first; - Store = IRB.CreateMemCpy(ArgShadowBase, - Alignment ? Alignment->value() : 0, AShadowPtr, - Alignment ? Alignment->value() : 0, Size); + Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr, + Alignment, Size); // TODO(glider): need to copy origins. } else { Size = DL.getTypeAllocSize(A->getType()); @@ -3826,11 +3825,11 @@ struct VarArgAMD64Helper : public VarArgHelper { MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment.value(), ShadowPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr, + kShadowTLSAlignment, ArgSize); if (MS.TrackOrigins) - IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment.value(), OriginPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr, + kShadowTLSAlignment, ArgSize); } else { ArgKind AK = classifyArgument(A); if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset) @@ -3953,10 +3952,11 @@ struct VarArgAMD64Helper : public VarArgHelper { IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); if (MS.TrackOrigins) { VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSOriginCopy, 8, MS.VAArgOriginTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS, + Align(8), CopySize); } } @@ -3979,12 +3979,11 @@ struct VarArgAMD64Helper : public VarArgHelper { std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), AMD64FpEndOffset); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + AMD64FpEndOffset); if (MS.TrackOrigins) - IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment.value(), - VAArgTLSOriginCopy, Alignment.value(), - AMD64FpEndOffset); + IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy, + Alignment, AMD64FpEndOffset); Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C); Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr( IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy), @@ -3998,13 +3997,13 @@ struct VarArgAMD64Helper : public VarArgHelper { Alignment, /*isStore*/ true); Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment.value(), SrcPtr, - Alignment.value(), VAArgOverflowSize); + IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); if (MS.TrackOrigins) { SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy, AMD64FpEndOffset); - IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment.value(), SrcPtr, - Alignment.value(), VAArgOverflowSize); + IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment, + VAArgOverflowSize); } } } @@ -4102,7 +4101,7 @@ struct VarArgMIPS64Helper : public VarArgHelper { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } // Instrument va_start. @@ -4122,8 +4121,8 @@ struct VarArgMIPS64Helper : public VarArgHelper { std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), CopySize); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; @@ -4294,7 +4293,7 @@ struct VarArgAArch64Helper : public VarArgHelper { IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset), VAArgOverflowSize); VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize); @@ -4352,7 +4351,8 @@ struct VarArgAArch64Helper : public VarArgHelper { GrRegSaveAreaShadowPtrOff); Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, 8, GrSrcPtr, 8, GrCopySize); + IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8), + GrCopySize); // Again, but for FP/SIMD values. Value *VrRegSaveAreaShadowPtrOff = @@ -4370,7 +4370,8 @@ struct VarArgAArch64Helper : public VarArgHelper { VrRegSaveAreaShadowPtrOff); Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff); - IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, 8, VrSrcPtr, 8, VrCopySize); + IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8), + VrCopySize); // And finally for remaining arguments. Value *StackSaveAreaShadowPtr = @@ -4382,8 +4383,8 @@ struct VarArgAArch64Helper : public VarArgHelper { IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy, IRB.getInt32(AArch64VAEndOffset)); - IRB.CreateMemCpy(StackSaveAreaShadowPtr, 16, StackSrcPtr, 16, - VAArgOverflowSize); + IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr, + Align(16), VAArgOverflowSize); } } }; @@ -4443,8 +4444,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper { MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment, /*isStore*/ false); - IRB.CreateMemCpy(Base, kShadowTLSAlignment.value(), AShadowPtr, - kShadowTLSAlignment.value(), ArgSize); + IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr, + kShadowTLSAlignment, ArgSize); } } VAArgOffset += alignTo(ArgSize, 8); @@ -4541,7 +4542,7 @@ struct VarArgPowerPC64Helper : public VarArgHelper { // If there is a va_start in this function, make a backup copy of // va_arg_tls somewhere in the function entry block. VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize); - IRB.CreateMemCpy(VAArgTLSCopy, 8, MS.VAArgTLS, 8, CopySize); + IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize); } // Instrument va_start. @@ -4561,8 +4562,8 @@ struct VarArgPowerPC64Helper : public VarArgHelper { std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) = MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true); - IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment.value(), VAArgTLSCopy, - Alignment.value(), CopySize); + IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment, + CopySize); } } }; diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp index 89765d7d955..dfb1b6bfb73 100644 --- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp +++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp @@ -808,15 +808,13 @@ static bool handleMemIntrinsicPtrUse(MemIntrinsic *MI, Value *OldV, if (isa<MemCpyInst>(MTI)) { MDNode *TBAAStruct = MTI->getMetadata(LLVMContext::MD_tbaa_struct); - B.CreateMemCpy(Dest, MTI->getDestAlignment(), - Src, MTI->getSourceAlignment(), + B.CreateMemCpy(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), MTI->getLength(), false, // isVolatile TBAA, TBAAStruct, ScopeMD, NoAliasMD); } else { assert(isa<MemMoveInst>(MTI)); - B.CreateMemMove(Dest, MTI->getDestAlignment(), - Src, MTI->getSourceAlignment(), + B.CreateMemMove(Dest, MTI->getDestAlign(), Src, MTI->getSourceAlign(), MTI->getLength(), false, // isVolatile TBAA, ScopeMD, NoAliasMD); diff --git a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index b1b87f81000..b77843d7cd7 100644 --- a/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/llvm/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -1084,8 +1084,8 @@ bool LoopIdiomRecognize::processLoopStoreOfLoopLoad(StoreInst *SI, // If the load or store are atomic, then they must necessarily be unordered // by previous checks. if (!SI->isAtomic() && !LI->isAtomic()) - NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlignment(), - LoadBasePtr, LI->getAlignment(), NumBytes); + NewCall = Builder.CreateMemCpy(StoreBasePtr, SI->getAlign(), LoadBasePtr, + LI->getAlign(), NumBytes); else { // We cannot allow unaligned ops for unordered load/store, so reject // anything where the alignment isn't at least the element size. diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 89979076d19..c24fa40860e 100644 --- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -413,25 +413,21 @@ Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst, return AMemSet; } -static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) { - unsigned StoreAlign = SI->getAlignment(); - if (!StoreAlign) - StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType()); - return StoreAlign; +static Align findStoreAlignment(const DataLayout &DL, const StoreInst *SI) { + return DL.getValueOrABITypeAlignment(MaybeAlign(SI->getAlignment()), + SI->getOperand(0)->getType()); } -static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) { - unsigned LoadAlign = LI->getAlignment(); - if (!LoadAlign) - LoadAlign = DL.getABITypeAlignment(LI->getType()); - return LoadAlign; +static Align findLoadAlignment(const DataLayout &DL, const LoadInst *LI) { + return DL.getValueOrABITypeAlignment(MaybeAlign(LI->getAlignment()), + LI->getType()); } -static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI, - const LoadInst *LI) { - unsigned StoreAlign = findStoreAlignment(DL, SI); - unsigned LoadAlign = findLoadAlignment(DL, LI); - return MinAlign(StoreAlign, LoadAlign); +static Align findCommonAlignment(const DataLayout &DL, const StoreInst *SI, + const LoadInst *LI) { + Align StoreAlign = findStoreAlignment(DL, SI); + Align LoadAlign = findLoadAlignment(DL, LI); + return commonAlignment(StoreAlign, LoadAlign); } // This method try to lift a store instruction before position P. @@ -646,7 +642,7 @@ bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { LI, SI->getPointerOperand()->stripPointerCasts(), LI->getPointerOperand()->stripPointerCasts(), DL.getTypeStoreSize(SI->getOperand(0)->getType()), - findCommonAlignment(DL, SI, LI), C); + findCommonAlignment(DL, SI, LI).value(), C); if (changed) { MD->removeInstruction(SI); SI->eraseFromParent(); @@ -978,12 +974,12 @@ bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M, // example we could be moving from movaps -> movq on x86. IRBuilder<> Builder(M); if (UseMemMove) - Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(), - MDep->getRawSource(), MDep->getSourceAlignment(), + Builder.CreateMemMove(M->getRawDest(), M->getDestAlign(), + MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); else - Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(), - MDep->getRawSource(), MDep->getSourceAlignment(), + Builder.CreateMemCpy(M->getRawDest(), M->getDestAlign(), + MDep->getRawSource(), MDep->getSourceAlign(), M->getLength(), M->isVolatile()); // Remove the instruction we're replacing. diff --git a/llvm/lib/Transforms/Utils/InlineFunction.cpp b/llvm/lib/Transforms/Utils/InlineFunction.cpp index 770dad0e1b0..401a0e8acb0 100644 --- a/llvm/lib/Transforms/Utils/InlineFunction.cpp +++ b/llvm/lib/Transforms/Utils/InlineFunction.cpp @@ -1254,7 +1254,8 @@ static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M, // Always generate a memcpy of alignment 1 here because we don't know // the alignment of the src pointer. Other optimizations can infer // better alignment. - Builder.CreateMemCpy(Dst, /*DstAlign*/1, Src, /*SrcAlign*/1, Size); + Builder.CreateMemCpy(Dst, /*DstAlign*/ Align::None(), Src, + /*SrcAlign*/ Align::None(), Size); } /// When inlining a call site that has a byval argument, diff --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp index 7213c0ca72d..6c096008b9e 100644 --- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp +++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp @@ -288,8 +288,9 @@ Value *LibCallSimplifier::emitStrLenMemCpy(Value *Src, Value *Dst, uint64_t Len, // We have enough information to now generate the memcpy call to do the // concatenation for us. Make a memcpy to copy the nul byte with align = 1. - B.CreateMemCpy(CpyDst, 1, Src, 1, - ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1)); + B.CreateMemCpy( + CpyDst, Align::None(), Src, Align::None(), + ConstantInt::get(DL.getIntPtrType(Src->getContext()), Len + 1)); return Dst; } @@ -561,7 +562,7 @@ Value *LibCallSimplifier::optimizeStrCpy(CallInst *CI, IRBuilder<> &B) { // We have enough information to now generate the memcpy call to do the // copy for us. Make a memcpy to copy the nul byte with align = 1. CallInst *NewCI = - B.CreateMemCpy(Dst, 1, Src, 1, + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), ConstantInt::get(DL.getIntPtrType(CI->getContext()), Len)); NewCI->setAttributes(CI->getAttributes()); return Dst; @@ -589,7 +590,8 @@ Value *LibCallSimplifier::optimizeStpCpy(CallInst *CI, IRBuilder<> &B) { // We have enough information to now generate the memcpy call to do the // copy for us. Make a memcpy to copy the nul byte with align = 1. - CallInst *NewCI = B.CreateMemCpy(Dst, 1, Src, 1, LenV); + CallInst *NewCI = + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), LenV); NewCI->setAttributes(CI->getAttributes()); return DstEnd; } @@ -637,7 +639,8 @@ Value *LibCallSimplifier::optimizeStrNCpy(CallInst *CI, IRBuilder<> &B) { Type *PT = Callee->getFunctionType()->getParamType(0); // strncpy(x, s, c) -> memcpy(align 1 x, align 1 s, c) [s and c are constant] - CallInst *NewCI = B.CreateMemCpy(Dst, 1, Src, 1, ConstantInt::get(DL.getIntPtrType(PT), Len)); + CallInst *NewCI = B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), + ConstantInt::get(DL.getIntPtrType(PT), Len)); NewCI->setAttributes(CI->getAttributes()); return Dst; } @@ -1113,8 +1116,8 @@ Value *LibCallSimplifier::optimizeMemCpy(CallInst *CI, IRBuilder<> &B) { return nullptr; // memcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n) - CallInst *NewCI = - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, Size); + CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), Size); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -1143,7 +1146,8 @@ Value *LibCallSimplifier::optimizeMemCCpy(CallInst *CI, IRBuilder<> &B) { size_t Pos = SrcStr.find(StopChar->getSExtValue() & 0xFF); if (Pos == StringRef::npos) { if (N->getZExtValue() <= SrcStr.size()) { - B.CreateMemCpy(Dst, 1, Src, 1, CI->getArgOperand(3)); + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), + CI->getArgOperand(3)); return Constant::getNullValue(CI->getType()); } return nullptr; @@ -1152,7 +1156,7 @@ Value *LibCallSimplifier::optimizeMemCCpy(CallInst *CI, IRBuilder<> &B) { Value *NewN = ConstantInt::get(N->getType(), std::min(uint64_t(Pos + 1), N->getZExtValue())); // memccpy -> llvm.memcpy - B.CreateMemCpy(Dst, 1, Src, 1, NewN); + B.CreateMemCpy(Dst, Align::None(), Src, Align::None(), NewN); return Pos + 1 <= N->getZExtValue() ? B.CreateInBoundsGEP(B.getInt8Ty(), Dst, NewN) : Constant::getNullValue(CI->getType()); @@ -1162,7 +1166,8 @@ Value *LibCallSimplifier::optimizeMemPCpy(CallInst *CI, IRBuilder<> &B) { Value *Dst = CI->getArgOperand(0); Value *N = CI->getArgOperand(2); // mempcpy(x, y, n) -> llvm.memcpy(align 1 x, align 1 y, n), x + n - CallInst *NewCI = B.CreateMemCpy(Dst, 1, CI->getArgOperand(1), 1, N); + CallInst *NewCI = B.CreateMemCpy(Dst, Align::None(), CI->getArgOperand(1), + Align::None(), N); NewCI->setAttributes(CI->getAttributes()); return B.CreateInBoundsGEP(B.getInt8Ty(), Dst, N); } @@ -1174,8 +1179,8 @@ Value *LibCallSimplifier::optimizeMemMove(CallInst *CI, IRBuilder<> &B) { return nullptr; // memmove(x, y, n) -> llvm.memmove(align 1 x, align 1 y, n) - CallInst *NewCI = - B.CreateMemMove(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, Size); + CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), Size); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -2466,9 +2471,11 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) { return nullptr; // we found a format specifier, bail out. // sprintf(str, fmt) -> llvm.memcpy(align 1 str, align 1 fmt, strlen(fmt)+1) - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, - ConstantInt::get(DL.getIntPtrType(CI->getContext()), - FormatStr.size() + 1)); // Copy the null byte. + B.CreateMemCpy( + CI->getArgOperand(0), Align::None(), CI->getArgOperand(1), + Align::None(), + ConstantInt::get(DL.getIntPtrType(CI->getContext()), + FormatStr.size() + 1)); // Copy the null byte. return ConstantInt::get(CI->getType(), FormatStr.size()); } @@ -2503,7 +2510,8 @@ Value *LibCallSimplifier::optimizeSPrintFString(CallInst *CI, IRBuilder<> &B) { return nullptr; Value *IncLen = B.CreateAdd(Len, ConstantInt::get(Len->getType(), 1), "leninc"); - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(2), 1, IncLen); + B.CreateMemCpy(CI->getArgOperand(0), Align::None(), CI->getArgOperand(2), + Align::None(), IncLen); // The sprintf result is the unincremented number of bytes in the string. return B.CreateIntCast(Len, CI->getType(), false); @@ -2574,7 +2582,8 @@ Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI, IRBuilder<> &B) { // snprintf(dst, size, fmt) -> llvm.memcpy(align 1 dst, align 1 fmt, // strlen(fmt)+1) B.CreateMemCpy( - CI->getArgOperand(0), 1, CI->getArgOperand(2), 1, + CI->getArgOperand(0), Align::None(), CI->getArgOperand(2), + Align::None(), ConstantInt::get(DL.getIntPtrType(CI->getContext()), FormatStr.size() + 1)); // Copy the null byte. return ConstantInt::get(CI->getType(), FormatStr.size()); @@ -2615,7 +2624,8 @@ Value *LibCallSimplifier::optimizeSnPrintFString(CallInst *CI, IRBuilder<> &B) { else if (N < Str.size() + 1) return nullptr; - B.CreateMemCpy(CI->getArgOperand(0), 1, CI->getArgOperand(3), 1, + B.CreateMemCpy(CI->getArgOperand(0), Align::None(), CI->getArgOperand(3), + Align::None(), ConstantInt::get(CI->getType(), Str.size() + 1)); // The snprintf result is the unincremented number of bytes in the string. @@ -2833,7 +2843,8 @@ Value *LibCallSimplifier::optimizePuts(CallInst *CI, IRBuilder<> &B) { Value *LibCallSimplifier::optimizeBCopy(CallInst *CI, IRBuilder<> &B) { // bcopy(src, dst, n) -> llvm.memmove(dst, src, n) - return B.CreateMemMove(CI->getArgOperand(1), 1, CI->getArgOperand(0), 1, + return B.CreateMemMove(CI->getArgOperand(1), Align::None(), + CI->getArgOperand(0), Align::None(), CI->getArgOperand(2)); } @@ -3266,8 +3277,9 @@ FortifiedLibCallSimplifier::isFortifiedCallFoldable(CallInst *CI, Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, IRBuilder<> &B) { if (isFortifiedCallFoldable(CI, 3, 2)) { - CallInst *NewCI = B.CreateMemCpy( - CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, CI->getArgOperand(2)); + CallInst *NewCI = B.CreateMemCpy(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), + CI->getArgOperand(2)); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } @@ -3277,8 +3289,9 @@ Value *FortifiedLibCallSimplifier::optimizeMemCpyChk(CallInst *CI, Value *FortifiedLibCallSimplifier::optimizeMemMoveChk(CallInst *CI, IRBuilder<> &B) { if (isFortifiedCallFoldable(CI, 3, 2)) { - CallInst *NewCI = B.CreateMemMove( - CI->getArgOperand(0), 1, CI->getArgOperand(1), 1, CI->getArgOperand(2)); + CallInst *NewCI = B.CreateMemMove(CI->getArgOperand(0), Align::None(), + CI->getArgOperand(1), Align::None(), + CI->getArgOperand(2)); NewCI->setAttributes(CI->getAttributes()); return CI->getArgOperand(0); } |