summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2011-11-24 11:23:15 +0000
committerChandler Carruth <chandlerc@gmail.com>2011-11-24 11:23:15 +0000
commit7adee1a01a39b891a69cf740aa87184f08effe5a (patch)
tree9f39a963a3db496c2cbe9b49f476df4cdbddef3d
parentd394bafd2d7014e16e269abd1499235b24d2126d (diff)
downloadbcm5719-llvm-7adee1a01a39b891a69cf740aa87184f08effe5a.tar.gz
bcm5719-llvm-7adee1a01a39b891a69cf740aa87184f08effe5a.zip
Fix a silly use-after-free issue. A much earlier version of this code
need lots of fanciness around retaining a reference to a Chain's slot in the BlockToChain map, but that's all gone now. We can just go directly to allocating the new chain (which will update the mapping for us) and using it. Somewhat gross mechanically generated test case replicates the issue Duncan spotted when actually testing this out. llvm-svn: 145120
-rw-r--r--llvm/lib/CodeGen/MachineBlockPlacement.cpp4
-rw-r--r--llvm/test/CodeGen/X86/block-placement.ll211
2 files changed, 213 insertions, 2 deletions
diff --git a/llvm/lib/CodeGen/MachineBlockPlacement.cpp b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
index 870e24884af..55d804b31e6 100644
--- a/llvm/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/llvm/lib/CodeGen/MachineBlockPlacement.cpp
@@ -640,8 +640,8 @@ void MachineBlockPlacement::buildCFGChains(MachineFunction &F) {
SmallVector<MachineOperand, 4> Cond; // For AnalyzeBranch.
for (MachineFunction::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
MachineBasicBlock *BB = FI;
- BlockChain *&Chain = BlockToChain[BB];
- Chain = new (ChainAllocator.Allocate()) BlockChain(BlockToChain, BB);
+ BlockChain *Chain
+ = new (ChainAllocator.Allocate()) BlockChain(BlockToChain, BB);
// Also, merge any blocks which we cannot reason about and must preserve
// the exact fallthrough behavior for.
for (;;) {
diff --git a/llvm/test/CodeGen/X86/block-placement.ll b/llvm/test/CodeGen/X86/block-placement.ll
index a6ddc4d68ee..f87d1a6daf7 100644
--- a/llvm/test/CodeGen/X86/block-placement.ll
+++ b/llvm/test/CodeGen/X86/block-placement.ll
@@ -649,3 +649,214 @@ exit:
ret void
}
+define void @many_unanalyzable_branches() {
+; Ensure that we don't crash as we're building up many unanalyzable branches,
+; blocks, and loops.
+;
+; CHECK: many_unanalyzable_branches
+; CHECK: %entry
+; CHECK: %exit
+
+entry:
+ br label %0
+
+ %val0 = volatile load float* undef
+ %cmp0 = fcmp une float %val0, undef
+ br i1 %cmp0, label %1, label %0
+ %val1 = volatile load float* undef
+ %cmp1 = fcmp une float %val1, undef
+ br i1 %cmp1, label %2, label %1
+ %val2 = volatile load float* undef
+ %cmp2 = fcmp une float %val2, undef
+ br i1 %cmp2, label %3, label %2
+ %val3 = volatile load float* undef
+ %cmp3 = fcmp une float %val3, undef
+ br i1 %cmp3, label %4, label %3
+ %val4 = volatile load float* undef
+ %cmp4 = fcmp une float %val4, undef
+ br i1 %cmp4, label %5, label %4
+ %val5 = volatile load float* undef
+ %cmp5 = fcmp une float %val5, undef
+ br i1 %cmp5, label %6, label %5
+ %val6 = volatile load float* undef
+ %cmp6 = fcmp une float %val6, undef
+ br i1 %cmp6, label %7, label %6
+ %val7 = volatile load float* undef
+ %cmp7 = fcmp une float %val7, undef
+ br i1 %cmp7, label %8, label %7
+ %val8 = volatile load float* undef
+ %cmp8 = fcmp une float %val8, undef
+ br i1 %cmp8, label %9, label %8
+ %val9 = volatile load float* undef
+ %cmp9 = fcmp une float %val9, undef
+ br i1 %cmp9, label %10, label %9
+ %val10 = volatile load float* undef
+ %cmp10 = fcmp une float %val10, undef
+ br i1 %cmp10, label %11, label %10
+ %val11 = volatile load float* undef
+ %cmp11 = fcmp une float %val11, undef
+ br i1 %cmp11, label %12, label %11
+ %val12 = volatile load float* undef
+ %cmp12 = fcmp une float %val12, undef
+ br i1 %cmp12, label %13, label %12
+ %val13 = volatile load float* undef
+ %cmp13 = fcmp une float %val13, undef
+ br i1 %cmp13, label %14, label %13
+ %val14 = volatile load float* undef
+ %cmp14 = fcmp une float %val14, undef
+ br i1 %cmp14, label %15, label %14
+ %val15 = volatile load float* undef
+ %cmp15 = fcmp une float %val15, undef
+ br i1 %cmp15, label %16, label %15
+ %val16 = volatile load float* undef
+ %cmp16 = fcmp une float %val16, undef
+ br i1 %cmp16, label %17, label %16
+ %val17 = volatile load float* undef
+ %cmp17 = fcmp une float %val17, undef
+ br i1 %cmp17, label %18, label %17
+ %val18 = volatile load float* undef
+ %cmp18 = fcmp une float %val18, undef
+ br i1 %cmp18, label %19, label %18
+ %val19 = volatile load float* undef
+ %cmp19 = fcmp une float %val19, undef
+ br i1 %cmp19, label %20, label %19
+ %val20 = volatile load float* undef
+ %cmp20 = fcmp une float %val20, undef
+ br i1 %cmp20, label %21, label %20
+ %val21 = volatile load float* undef
+ %cmp21 = fcmp une float %val21, undef
+ br i1 %cmp21, label %22, label %21
+ %val22 = volatile load float* undef
+ %cmp22 = fcmp une float %val22, undef
+ br i1 %cmp22, label %23, label %22
+ %val23 = volatile load float* undef
+ %cmp23 = fcmp une float %val23, undef
+ br i1 %cmp23, label %24, label %23
+ %val24 = volatile load float* undef
+ %cmp24 = fcmp une float %val24, undef
+ br i1 %cmp24, label %25, label %24
+ %val25 = volatile load float* undef
+ %cmp25 = fcmp une float %val25, undef
+ br i1 %cmp25, label %26, label %25
+ %val26 = volatile load float* undef
+ %cmp26 = fcmp une float %val26, undef
+ br i1 %cmp26, label %27, label %26
+ %val27 = volatile load float* undef
+ %cmp27 = fcmp une float %val27, undef
+ br i1 %cmp27, label %28, label %27
+ %val28 = volatile load float* undef
+ %cmp28 = fcmp une float %val28, undef
+ br i1 %cmp28, label %29, label %28
+ %val29 = volatile load float* undef
+ %cmp29 = fcmp une float %val29, undef
+ br i1 %cmp29, label %30, label %29
+ %val30 = volatile load float* undef
+ %cmp30 = fcmp une float %val30, undef
+ br i1 %cmp30, label %31, label %30
+ %val31 = volatile load float* undef
+ %cmp31 = fcmp une float %val31, undef
+ br i1 %cmp31, label %32, label %31
+ %val32 = volatile load float* undef
+ %cmp32 = fcmp une float %val32, undef
+ br i1 %cmp32, label %33, label %32
+ %val33 = volatile load float* undef
+ %cmp33 = fcmp une float %val33, undef
+ br i1 %cmp33, label %34, label %33
+ %val34 = volatile load float* undef
+ %cmp34 = fcmp une float %val34, undef
+ br i1 %cmp34, label %35, label %34
+ %val35 = volatile load float* undef
+ %cmp35 = fcmp une float %val35, undef
+ br i1 %cmp35, label %36, label %35
+ %val36 = volatile load float* undef
+ %cmp36 = fcmp une float %val36, undef
+ br i1 %cmp36, label %37, label %36
+ %val37 = volatile load float* undef
+ %cmp37 = fcmp une float %val37, undef
+ br i1 %cmp37, label %38, label %37
+ %val38 = volatile load float* undef
+ %cmp38 = fcmp une float %val38, undef
+ br i1 %cmp38, label %39, label %38
+ %val39 = volatile load float* undef
+ %cmp39 = fcmp une float %val39, undef
+ br i1 %cmp39, label %40, label %39
+ %val40 = volatile load float* undef
+ %cmp40 = fcmp une float %val40, undef
+ br i1 %cmp40, label %41, label %40
+ %val41 = volatile load float* undef
+ %cmp41 = fcmp une float %val41, undef
+ br i1 %cmp41, label %42, label %41
+ %val42 = volatile load float* undef
+ %cmp42 = fcmp une float %val42, undef
+ br i1 %cmp42, label %43, label %42
+ %val43 = volatile load float* undef
+ %cmp43 = fcmp une float %val43, undef
+ br i1 %cmp43, label %44, label %43
+ %val44 = volatile load float* undef
+ %cmp44 = fcmp une float %val44, undef
+ br i1 %cmp44, label %45, label %44
+ %val45 = volatile load float* undef
+ %cmp45 = fcmp une float %val45, undef
+ br i1 %cmp45, label %46, label %45
+ %val46 = volatile load float* undef
+ %cmp46 = fcmp une float %val46, undef
+ br i1 %cmp46, label %47, label %46
+ %val47 = volatile load float* undef
+ %cmp47 = fcmp une float %val47, undef
+ br i1 %cmp47, label %48, label %47
+ %val48 = volatile load float* undef
+ %cmp48 = fcmp une float %val48, undef
+ br i1 %cmp48, label %49, label %48
+ %val49 = volatile load float* undef
+ %cmp49 = fcmp une float %val49, undef
+ br i1 %cmp49, label %50, label %49
+ %val50 = volatile load float* undef
+ %cmp50 = fcmp une float %val50, undef
+ br i1 %cmp50, label %51, label %50
+ %val51 = volatile load float* undef
+ %cmp51 = fcmp une float %val51, undef
+ br i1 %cmp51, label %52, label %51
+ %val52 = volatile load float* undef
+ %cmp52 = fcmp une float %val52, undef
+ br i1 %cmp52, label %53, label %52
+ %val53 = volatile load float* undef
+ %cmp53 = fcmp une float %val53, undef
+ br i1 %cmp53, label %54, label %53
+ %val54 = volatile load float* undef
+ %cmp54 = fcmp une float %val54, undef
+ br i1 %cmp54, label %55, label %54
+ %val55 = volatile load float* undef
+ %cmp55 = fcmp une float %val55, undef
+ br i1 %cmp55, label %56, label %55
+ %val56 = volatile load float* undef
+ %cmp56 = fcmp une float %val56, undef
+ br i1 %cmp56, label %57, label %56
+ %val57 = volatile load float* undef
+ %cmp57 = fcmp une float %val57, undef
+ br i1 %cmp57, label %58, label %57
+ %val58 = volatile load float* undef
+ %cmp58 = fcmp une float %val58, undef
+ br i1 %cmp58, label %59, label %58
+ %val59 = volatile load float* undef
+ %cmp59 = fcmp une float %val59, undef
+ br i1 %cmp59, label %60, label %59
+ %val60 = volatile load float* undef
+ %cmp60 = fcmp une float %val60, undef
+ br i1 %cmp60, label %61, label %60
+ %val61 = volatile load float* undef
+ %cmp61 = fcmp une float %val61, undef
+ br i1 %cmp61, label %62, label %61
+ %val62 = volatile load float* undef
+ %cmp62 = fcmp une float %val62, undef
+ br i1 %cmp62, label %63, label %62
+ %val63 = volatile load float* undef
+ %cmp63 = fcmp une float %val63, undef
+ br i1 %cmp63, label %64, label %63
+ %val64 = volatile load float* undef
+ %cmp64 = fcmp une float %val64, undef
+ br i1 %cmp64, label %65, label %64
+
+ br label %exit
+exit:
+ ret void
+}
OpenPOWER on IntegriCloud