summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorScott Constable <scott.d.constable@intel.com>2020-06-10 15:31:47 -0700
committerTom Stellard <tstellar@redhat.com>2020-06-24 09:31:04 -0700
commit72bff7855d8ce42b831922a51763f9a0732bd473 (patch)
tree8a297de1434849d6838d2e7b719586ec8c4eb923 /llvm/lib
parent8aa8abae349dc1607884c24ca3b685d4c7d597d1 (diff)
downloadbcm5719-llvm-72bff7855d8ce42b831922a51763f9a0732bd473.tar.gz
bcm5719-llvm-72bff7855d8ce42b831922a51763f9a0732bd473.zip
[X86] Add an Unoptimized Load Value Injection (LVI) Load Hardening Pass
@nikic raised an issue on D75936 that the added complexity to the O0 pipeline was causing noticeable slowdowns for `-O0` builds. This patch addresses the issue by adding a pass with equal security properties, but without any optimizations (and more importantly, without the need for expensive analysis dependencies). Reviewers: nikic, craig.topper, mattdr Reviewed By: craig.topper, mattdr Differential Revision: https://reviews.llvm.org/D80964
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86.h2
-rw-r--r--llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp76
-rw-r--r--llvm/lib/Target/X86/X86TargetMachine.cpp5
3 files changed, 82 insertions, 1 deletions
diff --git a/llvm/lib/Target/X86/X86.h b/llvm/lib/Target/X86/X86.h
index 39b2f814def..a0ab5c3a5b3 100644
--- a/llvm/lib/Target/X86/X86.h
+++ b/llvm/lib/Target/X86/X86.h
@@ -134,6 +134,7 @@ InstructionSelector *createX86InstructionSelector(const X86TargetMachine &TM,
X86RegisterBankInfo &);
FunctionPass *createX86LoadValueInjectionLoadHardeningPass();
+FunctionPass *createX86LoadValueInjectionLoadHardeningUnoptimizedPass();
FunctionPass *createX86LoadValueInjectionRetHardeningPass();
FunctionPass *createX86SpeculativeLoadHardeningPass();
@@ -150,6 +151,7 @@ void initializeX86DomainReassignmentPass(PassRegistry &);
void initializeX86ExecutionDomainFixPass(PassRegistry &);
void initializeX86ExpandPseudoPass(PassRegistry &);
void initializeX86FlagsCopyLoweringPassPass(PassRegistry &);
+void initializeX86LoadValueInjectionLoadHardeningUnoptimizedPassPass(PassRegistry &);
void initializeX86LoadValueInjectionLoadHardeningPassPass(PassRegistry &);
void initializeX86LoadValueInjectionRetHardeningPassPass(PassRegistry &);
void initializeX86OptimizeLEAPassPass(PassRegistry &);
diff --git a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
index 50f8b3477ac..35fc439998f 100644
--- a/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86LoadValueInjectionLoadHardening.cpp
@@ -822,3 +822,79 @@ INITIALIZE_PASS_END(X86LoadValueInjectionLoadHardeningPass, PASS_KEY,
FunctionPass *llvm::createX86LoadValueInjectionLoadHardeningPass() {
return new X86LoadValueInjectionLoadHardeningPass();
}
+
+namespace {
+
+/// The `X86LoadValueInjectionLoadHardeningPass` above depends on expensive
+/// analysis passes that add complexity to the pipeline. This complexity
+/// can cause noticable overhead when no optimizations are enabled, i.e., -O0.
+/// The purpose of `X86LoadValueInjectionLoadHardeningUnoptimizedPass` is to
+/// provide the same security as the optimized pass, but without adding
+/// unnecessary complexity to the LLVM pipeline.
+///
+/// The behavior of this pass is simply to insert an LFENCE after every load
+/// instruction.
+class X86LoadValueInjectionLoadHardeningUnoptimizedPass
+ : public MachineFunctionPass {
+public:
+ X86LoadValueInjectionLoadHardeningUnoptimizedPass()
+ : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override {
+ return "X86 Load Value Injection (LVI) Load Hardening (Unoptimized)";
+ }
+ bool runOnMachineFunction(MachineFunction &MF) override;
+ static char ID;
+};
+
+} // end anonymous namespace
+
+char X86LoadValueInjectionLoadHardeningUnoptimizedPass::ID = 0;
+
+bool X86LoadValueInjectionLoadHardeningUnoptimizedPass::runOnMachineFunction(
+ MachineFunction &MF) {
+ LLVM_DEBUG(dbgs() << "***** " << getPassName() << " : " << MF.getName()
+ << " *****\n");
+ const X86Subtarget *STI = &MF.getSubtarget<X86Subtarget>();
+ if (!STI->useLVILoadHardening())
+ return false;
+
+ // FIXME: support 32-bit
+ if (!STI->is64Bit())
+ report_fatal_error("LVI load hardening is only supported on 64-bit", false);
+
+ // Don't skip functions with the "optnone" attr but participate in opt-bisect.
+ const Function &F = MF.getFunction();
+ if (!F.hasOptNone() && skipFunction(F))
+ return false;
+
+ bool Modified = false;
+ ++NumFunctionsConsidered;
+
+ const TargetInstrInfo *TII = STI->getInstrInfo();
+ for (auto &MBB : MF) {
+ for (auto &MI : MBB) {
+ if (!MI.mayLoad() || MI.getOpcode() == X86::LFENCE ||
+ MI.getOpcode() == X86::MFENCE)
+ continue;
+
+ MachineBasicBlock::iterator InsertionPt =
+ MI.getNextNode() ? MI.getNextNode() : MBB.end();
+ BuildMI(MBB, InsertionPt, DebugLoc(), TII->get(X86::LFENCE));
+ ++NumFences;
+ Modified = true;
+ }
+ }
+
+ if (Modified)
+ ++NumFunctionsMitigated;
+
+ return Modified;
+}
+
+INITIALIZE_PASS(X86LoadValueInjectionLoadHardeningUnoptimizedPass, PASS_KEY,
+ "X86 LVI load hardening", false, false)
+
+FunctionPass *llvm::createX86LoadValueInjectionLoadHardeningUnoptimizedPass() {
+ return new X86LoadValueInjectionLoadHardeningUnoptimizedPass();
+}
diff --git a/llvm/lib/Target/X86/X86TargetMachine.cpp b/llvm/lib/Target/X86/X86TargetMachine.cpp
index 680a52b5438..9f639ffa22e 100644
--- a/llvm/lib/Target/X86/X86TargetMachine.cpp
+++ b/llvm/lib/Target/X86/X86TargetMachine.cpp
@@ -498,7 +498,10 @@ void X86PassConfig::addMachineSSAOptimization() {
void X86PassConfig::addPostRegAlloc() {
addPass(createX86FloatingPointStackifierPass());
- addPass(createX86LoadValueInjectionLoadHardeningPass());
+ if (getOptLevel() != CodeGenOpt::None)
+ addPass(createX86LoadValueInjectionLoadHardeningPass());
+ else
+ addPass(createX86LoadValueInjectionLoadHardeningUnoptimizedPass());
}
void X86PassConfig::addPreSched2() { addPass(createX86ExpandPseudoPass()); }
OpenPOWER on IntegriCloud