diff options
Diffstat (limited to 'llvm/lib/Transforms/InstCombine')
4 files changed, 23 insertions, 18 deletions
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp index 6a0bd29069a..80eb51be84e 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -171,7 +171,7 @@ Instruction *InstCombiner::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) { Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy); Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy); - LoadInst *L = Builder.CreateLoad(Src); + LoadInst *L = Builder.CreateLoad(IntType, Src); // Alignment from the mem intrinsic will be better, so use it. L->setAlignment(CopySrcAlign); if (CopyMD) @@ -1182,7 +1182,8 @@ static Value *simplifyMaskedLoad(const IntrinsicInst &II, if (maskIsAllOneOrUndef(II.getArgOperand(2))) { Value *LoadPtr = II.getArgOperand(0); unsigned Alignment = cast<ConstantInt>(II.getArgOperand(1))->getZExtValue(); - return Builder.CreateAlignedLoad(LoadPtr, Alignment, "unmaskedload"); + return Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment, + "unmaskedload"); } return nullptr; @@ -1499,7 +1500,7 @@ static Value *simplifyNeonVld1(const IntrinsicInst &II, auto *BCastInst = Builder.CreateBitCast(II.getArgOperand(0), PointerType::get(II.getType(), 0)); - return Builder.CreateAlignedLoad(BCastInst, Alignment); + return Builder.CreateAlignedLoad(II.getType(), BCastInst, Alignment); } // Returns true iff the 2 intrinsics have the same operands, limiting the @@ -2300,7 +2301,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { &DT) >= 16) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(Ptr); + return new LoadInst(II->getType(), Ptr); } break; case Intrinsic::ppc_vsx_lxvw4x: @@ -2308,7 +2309,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { // Turn PPC VSX loads into normal loads. Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(Ptr, Twine(""), false, 1); + return new LoadInst(II->getType(), Ptr, Twine(""), false, 1); } case Intrinsic::ppc_altivec_stvx: case Intrinsic::ppc_altivec_stvxl: @@ -2336,7 +2337,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { II->getType()->getVectorNumElements()); Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(VTy)); - Value *Load = Builder.CreateLoad(Ptr); + Value *Load = Builder.CreateLoad(VTy, Ptr); return new FPExtInst(Load, II->getType()); } break; @@ -2346,7 +2347,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { &DT) >= 32) { Value *Ptr = Builder.CreateBitCast(II->getArgOperand(0), PointerType::getUnqual(II->getType())); - return new LoadInst(Ptr); + return new LoadInst(II->getType(), Ptr); } break; case Intrinsic::ppc_qpx_qvstfs: diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp index a7d86080451..a7750695fef 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp @@ -298,7 +298,7 @@ void PointerReplacer::replace(Instruction *I) { if (auto *LT = dyn_cast<LoadInst>(I)) { auto *V = getReplacement(LT->getPointerOperand()); assert(V && "Operand not replaced"); - auto *NewI = new LoadInst(V); + auto *NewI = new LoadInst(I->getType(), V); NewI->takeName(LT); IC.InsertNewInstWith(NewI, *LT); IC.replaceInstUsesWith(*LT, NewI); @@ -465,7 +465,7 @@ static LoadInst *combineLoadToNewType(InstCombiner &IC, LoadInst &LI, Type *NewT NewPtr = IC.Builder.CreateBitCast(Ptr, NewTy->getPointerTo(AS)); LoadInst *NewLoad = IC.Builder.CreateAlignedLoad( - NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); + NewTy, NewPtr, LI.getAlignment(), LI.isVolatile(), LI.getName() + Suffix); NewLoad->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); MDBuilder MDB(NewLoad->getContext()); for (const auto &MDPair : MD) { @@ -724,7 +724,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { auto *Ptr = IC.Builder.CreateInBoundsGEP(ST, Addr, makeArrayRef(Indices), Name + ".elt"); auto EltAlign = MinAlign(Align, SL->getElementOffset(i)); - auto *L = IC.Builder.CreateAlignedLoad(Ptr, EltAlign, Name + ".unpack"); + auto *L = IC.Builder.CreateAlignedLoad(ST->getElementType(i), Ptr, + EltAlign, Name + ".unpack"); // Propagate AA metadata. It'll still be valid on the narrowed load. AAMDNodes AAMD; LI.getAAMetadata(AAMD); @@ -774,8 +775,8 @@ static Instruction *unpackLoadToAggregate(InstCombiner &IC, LoadInst &LI) { }; auto *Ptr = IC.Builder.CreateInBoundsGEP(AT, Addr, makeArrayRef(Indices), Name + ".elt"); - auto *L = IC.Builder.CreateAlignedLoad(Ptr, MinAlign(Align, Offset), - Name + ".unpack"); + auto *L = IC.Builder.CreateAlignedLoad( + AT->getElementType(), Ptr, MinAlign(Align, Offset), Name + ".unpack"); AAMDNodes AAMD; LI.getAAMetadata(AAMD); L->setAAMetadata(AAMD); @@ -1065,10 +1066,12 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) { unsigned Align = LI.getAlignment(); if (isSafeToLoadUnconditionally(SI->getOperand(1), Align, DL, SI) && isSafeToLoadUnconditionally(SI->getOperand(2), Align, DL, SI)) { - LoadInst *V1 = Builder.CreateLoad(SI->getOperand(1), - SI->getOperand(1)->getName()+".val"); - LoadInst *V2 = Builder.CreateLoad(SI->getOperand(2), - SI->getOperand(2)->getName()+".val"); + LoadInst *V1 = + Builder.CreateLoad(LI.getType(), SI->getOperand(1), + SI->getOperand(1)->getName() + ".val"); + LoadInst *V2 = + Builder.CreateLoad(LI.getType(), SI->getOperand(2), + SI->getOperand(2)->getName() + ".val"); assert(LI.isUnordered() && "implied by above"); V1->setAlignment(Align); V1->setAtomic(LI.getOrdering(), LI.getSyncScopeID()); diff --git a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp index 0ef78214457..e217adec7ed 100644 --- a/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/llvm/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -595,7 +595,8 @@ Instruction *InstCombiner::FoldPHIArgLoadIntoPHI(PHINode &PN) { Value *InVal = FirstLI->getOperand(0); NewPN->addIncoming(InVal, PN.getIncomingBlock(0)); - LoadInst *NewLI = new LoadInst(NewPN, "", isVolatile, LoadAlignment); + LoadInst *NewLI = + new LoadInst(FirstLI->getType(), NewPN, "", isVolatile, LoadAlignment); unsigned KnownIDs[] = { LLVMContext::MD_tbaa, diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp index b30b3eeadcc..90a54a71a4c 100644 --- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp +++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp @@ -2686,7 +2686,7 @@ Instruction *InstCombiner::visitExtractValueInst(ExtractValueInst &EV) { Builder.SetInsertPoint(L); Value *GEP = Builder.CreateInBoundsGEP(L->getType(), L->getPointerOperand(), Indices); - Instruction *NL = Builder.CreateLoad(GEP); + Instruction *NL = Builder.CreateLoad(EV.getType(), GEP); // Whatever aliasing information we had for the orignal load must also // hold for the smaller load, so propagate the annotations. AAMDNodes Nodes; |