summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
committerChris Lattner <sabre@nondot.org>2009-02-12 06:56:08 +0000
commit5297c635656f43fec7877e3e5cc12c2b1933cd2b (patch)
treecea201e0f90954217a6602a38045d70aec5c8b1e /llvm
parent1331d53c27a7ad00463bf1e4fd1e43e00d7ca334 (diff)
downloadbcm5719-llvm-5297c635656f43fec7877e3e5cc12c2b1933cd2b.tar.gz
bcm5719-llvm-5297c635656f43fec7877e3e5cc12c2b1933cd2b.zip
fix PR3537: if resetting bbi back to the start of a block, we need to
forget about already inserted expressions. llvm-svn: 64362
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp13
-rw-r--r--llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll35
2 files changed, 44 insertions, 4 deletions
diff --git a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
index 0a1c641e2bf..12c76e85251 100644
--- a/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
+++ b/llvm/lib/Transforms/Scalar/CodeGenPrepare.cpp
@@ -1241,11 +1241,13 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// computation.
Value *&SunkAddr = SunkAddrs[Addr];
if (SunkAddr) {
- DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: Reusing nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
if (SunkAddr->getType() != Addr->getType())
SunkAddr = new BitCastInst(SunkAddr, Addr->getType(), "tmp", InsertPt);
} else {
- DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << "\n");
+ DEBUG(cerr << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
+ << *MemoryInst);
const Type *IntPtrTy = TLI->getTargetData()->getIntPtrType();
Value *Result = 0;
@@ -1505,9 +1507,12 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
if (TLI && isa<InlineAsm>(CI->getCalledValue()))
if (const TargetAsmInfo *TAI =
TLI->getTargetMachine().getTargetAsmInfo()) {
- if (TAI->ExpandInlineAsm(CI))
+ if (TAI->ExpandInlineAsm(CI)) {
BBI = BB.begin();
- else
+ // Avoid processing instructions out of order, which could cause
+ // reuse before a value is defined.
+ SunkAddrs.clear();
+ } else
// Sink address computing for memory operands into the block.
MadeChange |= OptimizeInlineAsmInst(I, &(*CI), SunkAddrs);
}
diff --git a/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll b/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
new file mode 100644
index 00000000000..7b73a86a72e
--- /dev/null
+++ b/llvm/test/CodeGen/X86/2009-02-11-codegenprepare-reuse.ll
@@ -0,0 +1,35 @@
+; RUN: llvm-as < %s | llc
+; PR3537
+target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
+target triple = "i386-apple-darwin9.6"
+ %struct.GetBitContext = type <{ i8*, i8*, i32, i32 }>
+
+define i32 @alac_decode_frame() nounwind {
+entry:
+ %tmp2 = load i8** null ; <i8*> [#uses=2]
+ %tmp34 = getelementptr i8* %tmp2, i32 4 ; <i8*> [#uses=2]
+ %tmp5.i424 = bitcast i8* %tmp34 to i8** ; <i8**> [#uses=2]
+ %tmp15.i = getelementptr i8* %tmp2, i32 12 ; <i8*> [#uses=1]
+ %0 = bitcast i8* %tmp15.i to i32* ; <i32*> [#uses=1]
+ br i1 false, label %if.then43, label %if.end47
+
+if.then43: ; preds = %entry
+ ret i32 0
+
+if.end47: ; preds = %entry
+ %tmp5.i590 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ store i32 19, i32* %0
+ %tmp6.i569 = load i8** %tmp5.i424 ; <i8*> [#uses=0]
+ %1 = call i32 asm "bswap $0", "=r,0,~{dirflag},~{fpsr},~{flags}"(i32 0) nounwind ; <i32> [#uses=0]
+ br i1 false, label %bb.nph, label %if.then63
+
+if.then63: ; preds = %if.end47
+ unreachable
+
+bb.nph: ; preds = %if.end47
+ %2 = bitcast i8* %tmp34 to %struct.GetBitContext* ; <%struct.GetBitContext*> [#uses=1]
+ %call9.i = call fastcc i32 @decode_scalar(%struct.GetBitContext* %2, i32 0, i32 0, i32 0) nounwind ; <i32> [#uses=0]
+ unreachable
+}
+
+declare fastcc i32 @decode_scalar(%struct.GetBitContext* nocapture, i32, i32, i32) nounwind
OpenPOWER on IntegriCloud