summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
diff options
context:
space:
mode:
authorEvan Cheng <evan.cheng@apple.com>2008-02-26 02:42:37 +0000
committerEvan Cheng <evan.cheng@apple.com>2008-02-26 02:42:37 +0000
commit1da250097bfadac69067e6e734b1f65fa23983a3 (patch)
treeb9db1b91d041faf0411c250cb689b9d58cf340e7 /llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
parent5affb58dd64025b09697c77b1094940d3619065b (diff)
downloadbcm5719-llvm-1da250097bfadac69067e6e734b1f65fa23983a3.tar.gz
bcm5719-llvm-1da250097bfadac69067e6e734b1f65fa23983a3.zip
Fix PR2076. CodeGenPrepare now sinks address computation for inline asm memory
operands into inline asm block. llvm-svn: 47589
Diffstat (limited to 'llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp')
-rw-r--r--llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp55
1 files changed, 55 insertions, 0 deletions
diff --git a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 5c572a640ed..e6f7283ef4e 100644
--- a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -18,6 +18,7 @@
#include "llvm/Constants.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Function.h"
+#include "llvm/InlineAsm.h"
#include "llvm/Instructions.h"
#include "llvm/Pass.h"
#include "llvm/Target/TargetAsmInfo.h"
@@ -28,6 +29,7 @@
#include "llvm/Transforms/Utils/Local.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/CallSite.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Debug.h"
@@ -58,6 +60,8 @@ namespace {
bool OptimizeLoadStoreInst(Instruction *I, Value *Addr,
const Type *AccessTy,
DenseMap<Value*,Value*> &SunkAddrs);
+ bool OptimizeInlineAsmInst(Instruction *I, CallSite CS,
+ DenseMap<Value*,Value*> &SunkAddrs);
bool OptimizeExtUses(Instruction *I);
};
}
@@ -928,6 +932,54 @@ bool CodeGenPrepare::OptimizeLoadStoreInst(Instruction *LdStInst, Value *Addr,
return true;
}
+/// OptimizeInlineAsmInst - If there are any memory operands, use
+/// OptimizeLoadStoreInt to sink their address computing into the block when
+/// possible / profitable.
+bool CodeGenPrepare::OptimizeInlineAsmInst(Instruction *I, CallSite CS,
+ DenseMap<Value*,Value*> &SunkAddrs) {
+ bool MadeChange = false;
+ InlineAsm *IA = cast<InlineAsm>(CS.getCalledValue());
+
+ // Do a prepass over the constraints, canonicalizing them, and building up the
+ // ConstraintOperands list.
+ std::vector<InlineAsm::ConstraintInfo>
+ ConstraintInfos = IA->ParseConstraints();
+
+ /// ConstraintOperands - Information about all of the constraints.
+ std::vector<TargetLowering::AsmOperandInfo> ConstraintOperands;
+ unsigned ArgNo = 0; // ArgNo - The argument of the CallInst.
+ for (unsigned i = 0, e = ConstraintInfos.size(); i != e; ++i) {
+ ConstraintOperands.
+ push_back(TargetLowering::AsmOperandInfo(ConstraintInfos[i]));
+ TargetLowering::AsmOperandInfo &OpInfo = ConstraintOperands.back();
+
+ // Compute the value type for each operand.
+ switch (OpInfo.Type) {
+ case InlineAsm::isOutput:
+ if (OpInfo.isIndirect)
+ OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
+ break;
+ case InlineAsm::isInput:
+ OpInfo.CallOperandVal = CS.getArgument(ArgNo++);
+ break;
+ case InlineAsm::isClobber:
+ // Nothing to do.
+ break;
+ }
+
+ // Compute the constraint code and ConstraintType to use.
+ OpInfo.ComputeConstraintToUse(*TLI);
+
+ if (OpInfo.ConstraintType == TargetLowering::C_Memory) {
+ Value *OpVal = OpInfo.CallOperandVal;
+ MadeChange |= OptimizeLoadStoreInst(I, OpVal, OpVal->getType(),
+ SunkAddrs);
+ }
+ }
+
+ return MadeChange;
+}
+
bool CodeGenPrepare::OptimizeExtUses(Instruction *I) {
BasicBlock *DefBB = I->getParent();
@@ -1076,6 +1128,9 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
TLI->getTargetMachine().getTargetAsmInfo()) {
if (TAI->ExpandInlineAsm(CI))
BBI = BB.begin();
+ else
+ // Sink address computing for memory operands into the block.
+ MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}
}
}
OpenPOWER on IntegriCloud