summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjoy Das <sanjoy@playingwithpointers.com>2015-06-15 18:44:27 +0000
committerSanjoy Das <sanjoy@playingwithpointers.com>2015-06-15 18:44:27 +0000
commit69fad0799e243f0df832f9b4a98cd74263d8d107 (patch)
tree25553a6a6a043b9ee7627ad77846613be2afe65b
parent6b34a462983498c71fd6fb839ecd0e49e56de4ce (diff)
downloadbcm5719-llvm-69fad0799e243f0df832f9b4a98cd74263d8d107.tar.gz
bcm5719-llvm-69fad0799e243f0df832f9b4a98cd74263d8d107.zip
[CodeGen] Add a pass to fold null checks into nearby memory operations.
Summary: This change adds an "ImplicitNullChecks" target dependent pass. This pass folds null checks into memory operation using the FAULTING_LOAD pseudo-op introduced in previous patches. Depends on D10197 Depends on D10199 Depends on D10200 Reviewers: reames, rnk, pgavlin, JosephTremoulet, atrick Reviewed By: atrick Subscribers: ab, JosephTremoulet, llvm-commits Differential Revision: http://reviews.llvm.org/D10201 llvm-svn: 239743
-rw-r--r--llvm/include/llvm/CodeGen/Passes.h4
-rw-r--r--llvm/include/llvm/InitializePasses.h1
-rw-r--r--llvm/lib/CodeGen/CMakeLists.txt1
-rw-r--r--llvm/lib/CodeGen/CodeGen.cpp1
-rw-r--r--llvm/lib/CodeGen/ImplicitNullChecks.cpp261
-rw-r--r--llvm/lib/CodeGen/Passes.cpp7
-rw-r--r--llvm/test/CodeGen/X86/implicit-null-check-negative.ll52
-rw-r--r--llvm/test/CodeGen/X86/implicit-null-check.ll118
8 files changed, 445 insertions, 0 deletions
diff --git a/llvm/include/llvm/CodeGen/Passes.h b/llvm/include/llvm/CodeGen/Passes.h
index 9c7e7b4001a..5d1224a720e 100644
--- a/llvm/include/llvm/CodeGen/Passes.h
+++ b/llvm/include/llvm/CodeGen/Passes.h
@@ -552,6 +552,10 @@ namespace llvm {
/// MachineCSE - This pass performs global CSE on machine instructions.
extern char &MachineCSEID;
+ /// ImplicitNullChecks - This pass folds null pointer checks into nearby
+ /// memory operations.
+ extern char &ImplicitNullChecksID;
+
/// MachineLICM - This pass performs LICM on machine instructions.
extern char &MachineLICMID;
diff --git a/llvm/include/llvm/InitializePasses.h b/llvm/include/llvm/InitializePasses.h
index 4f95c886800..5c57de8ed5d 100644
--- a/llvm/include/llvm/InitializePasses.h
+++ b/llvm/include/llvm/InitializePasses.h
@@ -187,6 +187,7 @@ void initializeMachineBlockPlacementPass(PassRegistry&);
void initializeMachineBlockPlacementStatsPass(PassRegistry&);
void initializeMachineBranchProbabilityInfoPass(PassRegistry&);
void initializeMachineCSEPass(PassRegistry&);
+void initializeImplicitNullChecksPass(PassRegistry&);
void initializeMachineDominatorTreePass(PassRegistry&);
void initializeMachineDominanceFrontierPass(PassRegistry&);
void initializeMachinePostDominatorTreePass(PassRegistry&);
diff --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index 2f65253c7c5..8ead486a789 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -27,6 +27,7 @@ add_llvm_library(LLVMCodeGen
GCStrategy.cpp
GlobalMerge.cpp
IfConversion.cpp
+ ImplicitNullChecks.cpp
InlineSpiller.cpp
InterferenceCache.cpp
IntrinsicLowering.cpp
diff --git a/llvm/lib/CodeGen/CodeGen.cpp b/llvm/lib/CodeGen/CodeGen.cpp
index 2c6eaf35a25..155c5ecec77 100644
--- a/llvm/lib/CodeGen/CodeGen.cpp
+++ b/llvm/lib/CodeGen/CodeGen.cpp
@@ -42,6 +42,7 @@ void llvm::initializeCodeGen(PassRegistry &Registry) {
initializeMachineBlockPlacementPass(Registry);
initializeMachineBlockPlacementStatsPass(Registry);
initializeMachineCSEPass(Registry);
+ initializeImplicitNullChecksPass(Registry);
initializeMachineCombinerPass(Registry);
initializeMachineCopyPropagationPass(Registry);
initializeMachineDominatorTreePass(Registry);
diff --git a/llvm/lib/CodeGen/ImplicitNullChecks.cpp b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
new file mode 100644
index 00000000000..122e23d4a5c
--- /dev/null
+++ b/llvm/lib/CodeGen/ImplicitNullChecks.cpp
@@ -0,0 +1,261 @@
+//===-- ImplicitNullChecks.cpp - Fold null checks into memory accesses ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass turns explicit null checks of the form
+//
+// test %r10, %r10
+// je throw_npe
+// movl (%r10), %esi
+// ...
+//
+// to
+//
+// faulting_load_op("movl (%r10), %esi", throw_npe)
+// ...
+//
+// With the help of a runtime that understands the .fault_maps section,
+// faulting_load_op branches to throw_npe if executing movl (%r10), %esi incurs
+// a page fault.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/IR/BasicBlock.h"
+#include "llvm/IR/Instruction.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Target/TargetInstrInfo.h"
+
+using namespace llvm;
+
+static cl::opt<unsigned> PageSize("imp-null-check-page-size",
+ cl::desc("The page size of the target in "
+ "bytes"),
+ cl::init(4096));
+
+namespace {
+
+class ImplicitNullChecks : public MachineFunctionPass {
+ /// Represents one null check that can be made implicit.
+ struct NullCheck {
+ // The memory operation the null check can be folded into.
+ MachineInstr *MemOperation;
+
+ // The instruction actually doing the null check (Ptr != 0).
+ MachineInstr *CheckOperation;
+
+ // The block the check resides in.
+ MachineBasicBlock *CheckBlock;
+
+ // The block branched to if the the pointer is non-null.
+ MachineBasicBlock *NotNullSucc;
+
+ // The block branched to if the the pointer is null.
+ MachineBasicBlock *NullSucc;
+
+ NullCheck()
+ : MemOperation(), CheckOperation(), CheckBlock(), NotNullSucc(),
+ NullSucc() {}
+
+ explicit NullCheck(MachineInstr *memOperation, MachineInstr *checkOperation,
+ MachineBasicBlock *checkBlock,
+ MachineBasicBlock *notNullSucc,
+ MachineBasicBlock *nullSucc)
+ : MemOperation(memOperation), CheckOperation(checkOperation),
+ CheckBlock(checkBlock), NotNullSucc(notNullSucc), NullSucc(nullSucc) {
+ }
+ };
+
+ const TargetInstrInfo *TII = nullptr;
+ const TargetRegisterInfo *TRI = nullptr;
+ MachineModuleInfo *MMI = nullptr;
+
+ bool analyzeBlockForNullChecks(MachineBasicBlock &MBB,
+ SmallVectorImpl<NullCheck> &NullCheckList);
+ MachineInstr *insertFaultingLoad(MachineInstr *LoadMI, MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel);
+ void rewriteNullChecks(ArrayRef<NullCheck> NullCheckList);
+
+public:
+ static char ID;
+
+ ImplicitNullChecks() : MachineFunctionPass(ID) {
+ initializeImplicitNullChecksPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnMachineFunction(MachineFunction &MF) override;
+};
+}
+
+bool ImplicitNullChecks::runOnMachineFunction(MachineFunction &MF) {
+ TII = MF.getSubtarget().getInstrInfo();
+ TRI = MF.getRegInfo().getTargetRegisterInfo();
+ MMI = &MF.getMMI();
+
+ SmallVector<NullCheck, 16> NullCheckList;
+
+ for (auto &MBB : MF)
+ analyzeBlockForNullChecks(MBB, NullCheckList);
+
+ if (!NullCheckList.empty())
+ rewriteNullChecks(NullCheckList);
+
+ return !NullCheckList.empty();
+}
+
+/// Analyze MBB to check if its terminating branch can be turned into an
+/// implicit null check. If yes, append a description of the said null check to
+/// NullCheckList and return true, else return false.
+bool ImplicitNullChecks::analyzeBlockForNullChecks(
+ MachineBasicBlock &MBB, SmallVectorImpl<NullCheck> &NullCheckList) {
+ typedef TargetInstrInfo::MachineBranchPredicate MachineBranchPredicate;
+
+ MachineBranchPredicate MBP;
+
+ if (TII->AnalyzeBranchPredicate(MBB, MBP, true))
+ return false;
+
+ // Is the predicate comparing an integer to zero?
+ if (!(MBP.LHS.isReg() && MBP.RHS.isImm() && MBP.RHS.getImm() == 0 &&
+ (MBP.Predicate == MachineBranchPredicate::PRED_NE ||
+ MBP.Predicate == MachineBranchPredicate::PRED_EQ)))
+ return false;
+
+ // If we cannot erase the test instruction itself, then making the null check
+ // implicit does not buy us much.
+ if (!MBP.SingleUseCondition)
+ return false;
+
+ MachineBasicBlock *NotNullSucc, *NullSucc;
+
+ if (MBP.Predicate == MachineBranchPredicate::PRED_NE) {
+ NotNullSucc = MBP.TrueDest;
+ NullSucc = MBP.FalseDest;
+ } else {
+ NotNullSucc = MBP.FalseDest;
+ NullSucc = MBP.TrueDest;
+ }
+
+ // We handle the simplest case for now. We can potentially do better by using
+ // the machine dominator tree.
+ if (NotNullSucc->pred_size() != 1)
+ return false;
+
+ // Starting with a code fragment like:
+ //
+ // test %RAX, %RAX
+ // jne LblNotNull
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+ // LblNotNull:
+ // Def = Load (%RAX + <offset>)
+ // ...
+ //
+ //
+ // we want to end up with
+ //
+ // Def = TrappingLoad (%RAX + <offset>), LblNull
+ // jmp LblNotNull ;; explicit or fallthrough
+ //
+ // LblNotNull:
+ // ...
+ //
+ // LblNull:
+ // callq throw_NullPointerException
+ //
+
+ unsigned PointerReg = MBP.LHS.getReg();
+ MachineInstr *MemOp = &*NotNullSucc->begin();
+ unsigned BaseReg, Offset;
+ if (TII->getMemOpBaseRegImmOfs(MemOp, BaseReg, Offset, TRI))
+ if (MemOp->mayLoad() && !MemOp->isPredicable() && BaseReg == PointerReg &&
+ Offset < PageSize && MemOp->getDesc().getNumDefs() == 1) {
+ NullCheckList.emplace_back(MemOp, MBP.ConditionDef, &MBB, NotNullSucc,
+ NullSucc);
+ return true;
+ }
+
+ return false;
+}
+
+/// Wrap a machine load instruction, LoadMI, into a FAULTING_LOAD_OP machine
+/// instruction. The FAULTING_LOAD_OP instruction does the same load as LoadMI
+/// (defining the same register), and branches to HandlerLabel if the load
+/// faults. The FAULTING_LOAD_OP instruction is inserted at the end of MBB.
+MachineInstr *ImplicitNullChecks::insertFaultingLoad(MachineInstr *LoadMI,
+ MachineBasicBlock *MBB,
+ MCSymbol *HandlerLabel) {
+ DebugLoc DL;
+ unsigned NumDefs = LoadMI->getDesc().getNumDefs();
+ assert(NumDefs == 1 && "other cases unhandled!");
+ (void)NumDefs;
+
+ unsigned DefReg = LoadMI->defs().begin()->getReg();
+ assert(std::distance(LoadMI->defs().begin(), LoadMI->defs().end()) == 1 &&
+ "expected exactly one def!");
+
+ auto MIB = BuildMI(MBB, DL, TII->get(TargetOpcode::FAULTING_LOAD_OP), DefReg)
+ .addSym(HandlerLabel)
+ .addImm(LoadMI->getOpcode());
+
+ for (auto &MO : LoadMI->uses())
+ MIB.addOperand(MO);
+
+ MIB.setMemRefs(LoadMI->memoperands_begin(), LoadMI->memoperands_end());
+
+ return MIB;
+}
+
+/// Rewrite the null checks in NullCheckList into implicit null checks.
+void ImplicitNullChecks::rewriteNullChecks(
+ ArrayRef<ImplicitNullChecks::NullCheck> NullCheckList) {
+ DebugLoc DL;
+
+ for (auto &NC : NullCheckList) {
+ MCSymbol *HandlerLabel = MMI->getContext().createTempSymbol();
+
+ // Remove the conditional branch dependent on the null check.
+ unsigned BranchesRemoved = TII->RemoveBranch(*NC.CheckBlock);
+ (void)BranchesRemoved;
+ assert(BranchesRemoved > 0 && "expected at least one branch!");
+
+ // Insert a faulting load where the conditional branch was originally. We
+ // check earlier ensures that this bit of code motion is legal. We do not
+ // touch the successors list for any basic block since we haven't changed
+ // control flow, we've just made it implicit.
+ insertFaultingLoad(NC.MemOperation, NC.CheckBlock, HandlerLabel);
+ NC.MemOperation->removeFromParent();
+ NC.CheckOperation->eraseFromParent();
+
+ // Insert an *unconditional* branch to not-null successor.
+ TII->InsertBranch(*NC.CheckBlock, NC.NotNullSucc, nullptr, /*Cond=*/None,
+ DL);
+
+ // Emit the HandlerLabel as an EH_LABEL.
+ BuildMI(*NC.NullSucc, NC.NullSucc->begin(), DL,
+ TII->get(TargetOpcode::EH_LABEL)).addSym(HandlerLabel);
+ }
+}
+
+char ImplicitNullChecks::ID = 0;
+char &llvm::ImplicitNullChecksID = ImplicitNullChecks::ID;
+INITIALIZE_PASS_BEGIN(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
+INITIALIZE_PASS_END(ImplicitNullChecks, "implicit-null-checks",
+ "Implicit null checks", false, false)
diff --git a/llvm/lib/CodeGen/Passes.cpp b/llvm/lib/CodeGen/Passes.cpp
index 4cd86e66c0e..59b96eb37e7 100644
--- a/llvm/lib/CodeGen/Passes.cpp
+++ b/llvm/lib/CodeGen/Passes.cpp
@@ -72,6 +72,10 @@ static cl::opt<bool> DisableCopyProp("disable-copyprop", cl::Hidden,
cl::desc("Disable Copy Propagation pass"));
static cl::opt<bool> DisablePartialLibcallInlining("disable-partial-libcall-inlining",
cl::Hidden, cl::desc("Disable Partial Libcall Inlining"));
+static cl::opt<bool> EnableImplicitNullChecks(
+ "enable-implicit-null-checks",
+ cl::desc("Fold null checks into faulting memory operations"),
+ cl::init(false));
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -543,6 +547,9 @@ void TargetPassConfig::addMachinePasses() {
// Run pre-sched2 passes.
addPreSched2();
+ if (EnableImplicitNullChecks)
+ addPass(&ImplicitNullChecksID);
+
// Second pass scheduler.
if (getOptLevel() != CodeGenOpt::None) {
if (MISchedPostRA)
diff --git a/llvm/test/CodeGen/X86/implicit-null-check-negative.ll b/llvm/test/CodeGen/X86/implicit-null-check-negative.ll
new file mode 100644
index 00000000000..0fcb0e95985
--- /dev/null
+++ b/llvm/test/CodeGen/X86/implicit-null-check-negative.ll
@@ -0,0 +1,52 @@
+; RUN: llc -mtriple=x86_64-apple-macosx -O3 -debug-only=faultmaps -enable-implicit-null-checks < %s | FileCheck %s
+
+; List cases where we should *not* be emitting implicit null checks.
+
+; CHECK-NOT: Fault Map Output
+
+define i32 @imp_null_check_load(i32* %x, i32* %y) {
+ entry:
+ %c = icmp eq i32* %x, null
+; It isn't legal to move the load from %x from "not_null" to here --
+; the store to %y could be aliasing it.
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ store i32 0, i32* %y
+ %t = load i32, i32* %x
+ ret i32 %t
+}
+
+define i32 @imp_null_check_gep_load(i32* %x) {
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+; null + 5000 * sizeof(i32) lies outside the null page and hence the
+; load to %t cannot be assumed to be reliably faulting.
+ %x.gep = getelementptr i32, i32* %x, i32 5000
+ %t = load i32, i32* %x.gep
+ ret i32 %t
+}
+
+define i32 @imp_null_check_load_no_md(i32* %x) {
+; Everything is okay except that the !never.executed metadata is
+; missing.
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ %t = load i32, i32* %x
+ ret i32 %t
+}
diff --git a/llvm/test/CodeGen/X86/implicit-null-check.ll b/llvm/test/CodeGen/X86/implicit-null-check.ll
new file mode 100644
index 00000000000..f4c539800fb
--- /dev/null
+++ b/llvm/test/CodeGen/X86/implicit-null-check.ll
@@ -0,0 +1,118 @@
+; RUN: llc -O3 -mtriple=x86_64-apple-macosx -enable-implicit-null-checks < %s | FileCheck %s
+
+define i32 @imp_null_check_load(i32* %x) {
+; CHECK-LABEL: _imp_null_check_load:
+; CHECK: Ltmp1:
+; CHECK: movl (%rdi), %eax
+; CHECK: retq
+; CHECK: Ltmp0:
+; CHECK: movl $42, %eax
+; CHECK: retq
+
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ %t = load i32, i32* %x
+ ret i32 %t
+}
+
+define i32 @imp_null_check_gep_load(i32* %x) {
+; CHECK-LABEL: _imp_null_check_gep_load:
+; CHECK: Ltmp3:
+; CHECK: movl 128(%rdi), %eax
+; CHECK: retq
+; CHECK: Ltmp2:
+; CHECK: movl $42, %eax
+; CHECK: retq
+
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ %x.gep = getelementptr i32, i32* %x, i32 32
+ %t = load i32, i32* %x.gep
+ ret i32 %t
+}
+
+define i32 @imp_null_check_add_result(i32* %x, i32 %p) {
+; CHECK-LABEL: _imp_null_check_add_result:
+; CHECK: Ltmp5:
+; CHECK: addl (%rdi), %esi
+; CHECK: movl %esi, %eax
+; CHECK: retq
+; CHECK: Ltmp4:
+; CHECK: movl $42, %eax
+; CHECK: retq
+
+ entry:
+ %c = icmp eq i32* %x, null
+ br i1 %c, label %is_null, label %not_null
+
+ is_null:
+ ret i32 42
+
+ not_null:
+ %t = load i32, i32* %x
+ %p1 = add i32 %t, %p
+ ret i32 %p1
+}
+
+; CHECK-LABEL: __LLVM_FaultMaps:
+
+; Version:
+; CHECK-NEXT: .byte 1
+
+; Reserved x2
+; CHECK-NEXT: .byte 0
+; CHECK-NEXT: .short 0
+
+; # functions:
+; CHECK-NEXT: .long 3
+
+; FunctionAddr:
+; CHECK-NEXT: .quad _imp_null_check_add_result
+; NumFaultingPCs
+; CHECK-NEXT: .long 1
+; Reserved:
+; CHECK-NEXT: .long 0
+; Fault[0].Type:
+; CHECK-NEXT: .long 1
+; Fault[0].FaultOffset:
+; CHECK-NEXT: .long Ltmp5-_imp_null_check_add_result
+; Fault[0].HandlerOffset:
+; CHECK-NEXT: .long Ltmp4-_imp_null_check_add_result
+
+; FunctionAddr:
+; CHECK-NEXT: .quad _imp_null_check_gep_load
+; NumFaultingPCs
+; CHECK-NEXT: .long 1
+; Reserved:
+; CHECK-NEXT: .long 0
+; Fault[0].Type:
+; CHECK-NEXT: .long 1
+; Fault[0].FaultOffset:
+; CHECK-NEXT: .long Ltmp3-_imp_null_check_gep_load
+; Fault[0].HandlerOffset:
+; CHECK-NEXT: .long Ltmp2-_imp_null_check_gep_load
+
+; FunctionAddr:
+; CHECK-NEXT: .quad _imp_null_check_load
+; NumFaultingPCs
+; CHECK-NEXT: .long 1
+; Reserved:
+; CHECK-NEXT: .long 0
+; Fault[0].Type:
+; CHECK-NEXT: .long 1
+; Fault[0].FaultOffset:
+; CHECK-NEXT: .long Ltmp1-_imp_null_check_load
+; Fault[0].HandlerOffset:
+; CHECK-NEXT: .long Ltmp0-_imp_null_check_load
OpenPOWER on IntegriCloud