diff options
| author | Nate Begeman <natebegeman@mac.com> | 2005-11-05 09:21:28 +0000 |
|---|---|---|
| committer | Nate Begeman <natebegeman@mac.com> | 2005-11-05 09:21:28 +0000 |
| commit | 848622f87f00b81bdfef2aed8c26f80e38217713 (patch) | |
| tree | cef0c6b2b461e20d131912b80cd6b21e5f895520 /llvm/lib/Transforms | |
| parent | 75fe59c4ead311d292bec997ff6a08c00e644b6d (diff) | |
| download | bcm5719-llvm-848622f87f00b81bdfef2aed8c26f80e38217713.tar.gz bcm5719-llvm-848622f87f00b81bdfef2aed8c26f80e38217713.zip | |
Add support alignment of allocation instructions.
Add support for specifying alignment and size of setjmp jmpbufs.
No targets currently do anything with this information, nor is it presrved
in the bytecode representation. That's coming up next.
llvm-svn: 24196
Diffstat (limited to 'llvm/lib/Transforms')
| -rw-r--r-- | llvm/lib/Transforms/IPO/GlobalOpt.cpp | 3 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/InstructionCombining.cpp | 8 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/LowerInvoke.cpp | 21 | ||||
| -rw-r--r-- | llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp | 5 |
4 files changed, 21 insertions, 16 deletions
diff --git a/llvm/lib/Transforms/IPO/GlobalOpt.cpp b/llvm/lib/Transforms/IPO/GlobalOpt.cpp index 987703c8679..c9c8835e190 100644 --- a/llvm/lib/Transforms/IPO/GlobalOpt.cpp +++ b/llvm/lib/Transforms/IPO/GlobalOpt.cpp @@ -678,7 +678,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, (unsigned)NElements->getRawValue()); MallocInst *NewMI = new MallocInst(NewTy, Constant::getNullValue(Type::UIntTy), - MI->getName(), MI); + MI->getAlignment(), MI->getName(), MI); std::vector<Value*> Indices; Indices.push_back(Constant::getNullValue(Type::IntTy)); Indices.push_back(Indices[0]); @@ -950,6 +950,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, DEBUG(std::cerr << "LOCALIZING GLOBAL: " << *GV); Instruction* FirstI = GS.AccessingFunction->getEntryBlock().begin(); const Type* ElemTy = GV->getType()->getElementType(); + // FIXME: Pass Global's alignment when globals have alignment AllocaInst* Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), FirstI); if (!isa<UndefValue>(GV->getInitializer())) new StoreInst(GV->getInitializer(), Alloca, FirstI); diff --git a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp index 1164fb2e03e..6934fe27a49 100644 --- a/llvm/lib/Transforms/Scalar/InstructionCombining.cpp +++ b/llvm/lib/Transforms/Scalar/InstructionCombining.cpp @@ -3936,9 +3936,9 @@ Instruction *InstCombiner::PromoteCastOfAllocation(CastInst &CI, std::string Name = AI.getName(); AI.setName(""); AllocationInst *New; if (isa<MallocInst>(AI)) - New = new MallocInst(CastElTy, Amt, Name); + New = new MallocInst(CastElTy, Amt, AI.getAlignment(), Name); else - New = new AllocaInst(CastElTy, Amt, Name); + New = new AllocaInst(CastElTy, Amt, AI.getAlignment(), Name); InsertNewInstBefore(New, AI); // If the allocation has multiple uses, insert a cast and change all things @@ -5266,10 +5266,10 @@ Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) { // Create and insert the replacement instruction... if (isa<MallocInst>(AI)) - New = new MallocInst(NewTy, 0, AI.getName()); + New = new MallocInst(NewTy, 0, AI.getAlignment(), AI.getName()); else { assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!"); - New = new AllocaInst(NewTy, 0, AI.getName()); + New = new AllocaInst(NewTy, 0, AI.getAlignment(), AI.getName()); } InsertNewInstBefore(New, AI); diff --git a/llvm/lib/Transforms/Scalar/LowerInvoke.cpp b/llvm/lib/Transforms/Scalar/LowerInvoke.cpp index 7039a4b7e1b..be706c3ef8e 100644 --- a/llvm/lib/Transforms/Scalar/LowerInvoke.cpp +++ b/llvm/lib/Transforms/Scalar/LowerInvoke.cpp @@ -67,6 +67,8 @@ namespace { GlobalVariable *JBListHead; Function *SetJmpFn, *LongJmpFn; public: + LowerInvoke(unsigned Size = 200, unsigned Align = 0) : JumpBufSize(Size), + JumpBufAlign(Align) {} bool doInitialization(Module &M); bool runOnFunction(Function &F); @@ -78,6 +80,9 @@ namespace { void rewriteExpensiveInvoke(InvokeInst *II, unsigned InvokeNo, AllocaInst *InvokeNum, SwitchInst *CatchSwitch); bool insertExpensiveEHSupport(Function &F); + + unsigned JumpBufSize; + unsigned JumpBufAlign; }; RegisterOpt<LowerInvoke> @@ -87,7 +92,10 @@ namespace { const PassInfo *llvm::LowerInvokePassID = X.getPassInfo(); // Public Interface To the LowerInvoke pass. -FunctionPass *llvm::createLowerInvokePass() { return new LowerInvoke(); } +FunctionPass *llvm::createLowerInvokePass(unsigned JumpBufSize, + unsigned JumpBufAlign) { + return new LowerInvoke(JumpBufSize, JumpBufAlign); +} // doInitialization - Make sure that there is a prototype for abort in the // current module. @@ -95,13 +103,8 @@ bool LowerInvoke::doInitialization(Module &M) { const Type *VoidPtrTy = PointerType::get(Type::SByteTy); AbortMessage = 0; if (ExpensiveEHSupport) { - // Insert a type for the linked list of jump buffers. Unfortunately, we - // don't know the size of the target's setjmp buffer, so we make a guess. - // If this guess turns out to be too small, bad stuff could happen. - unsigned JmpBufSize = 200; // PPC has 192 words - assert(sizeof(jmp_buf) <= JmpBufSize*sizeof(void*) && - "LowerInvoke doesn't know about targets with jmp_buf size > 200 words!"); - const Type *JmpBufTy = ArrayType::get(VoidPtrTy, JmpBufSize); + // Insert a type for the linked list of jump buffers. + const Type *JmpBufTy = ArrayType::get(VoidPtrTy, JumpBufSize); { // The type is recursive, so use a type holder. std::vector<const Type*> Elements; @@ -441,7 +444,7 @@ bool LowerInvoke::insertExpensiveEHSupport(Function &F) { // that needs to be restored on all exits from the function. This is an // alloca because the value needs to be live across invokes. AllocaInst *JmpBuf = - new AllocaInst(JBLinkTy, 0, "jblink", F.begin()->begin()); + new AllocaInst(JBLinkTy, 0, JumpBufAlign, "jblink", F.begin()->begin()); std::vector<Value*> Idx; Idx.push_back(Constant::getNullValue(Type::IntTy)); diff --git a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 4a6aee391cc..cc03d0ee683 100644 --- a/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/llvm/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -159,7 +159,8 @@ bool SROA::performScalarRepl(Function &F) { if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) { ElementAllocas.reserve(ST->getNumContainedTypes()); for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) { - AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, + AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0, + AI->getAlignment(), AI->getName() + "." + utostr(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing @@ -169,7 +170,7 @@ bool SROA::performScalarRepl(Function &F) { ElementAllocas.reserve(AT->getNumElements()); const Type *ElTy = AT->getElementType(); for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) { - AllocaInst *NA = new AllocaInst(ElTy, 0, + AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(), AI->getName() + "." + utostr(i), AI); ElementAllocas.push_back(NA); WorkList.push_back(NA); // Add to worklist for recursive processing |

