summaryrefslogtreecommitdiffstats
path: root/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
diff options
context:
space:
mode:
authorChandler Carruth <chandlerc@gmail.com>2018-07-16 11:38:48 +0000
committerChandler Carruth <chandlerc@gmail.com>2018-07-16 11:38:48 +0000
commite66a6f48e314c84d00e9580fa3d30fadb683dd11 (patch)
treed7dbb651a86fb1c127fc5ee206fc15212a2d7118 /llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
parentd1a381b17a4f0ecbd3bbd6c33db9c541037429eb (diff)
downloadbcm5719-llvm-e66a6f48e314c84d00e9580fa3d30fadb683dd11.tar.gz
bcm5719-llvm-e66a6f48e314c84d00e9580fa3d30fadb683dd11.zip
[x86/SLH] Fix a bug where we would try to post-load harden non-GPRs.
Found cases that hit the assert I added. This patch factors the validity checking into a nice helper routine and calls it when deciding to harden post-load, and asserts it when doing so later. I've added tests for the various ways of loading a floating point type, as well as loading all vector permutations. Even though many of these go to identical instructions, it seems good to somewhat comprehensively test them. I'm confident there will be more fixes needed here, I'll try to add tests each time as I get this predicate adjusted. llvm-svn: 337160
Diffstat (limited to 'llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp')
-rw-r--r--llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp38
1 files changed, 25 insertions, 13 deletions
diff --git a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
index cb8de34cf05..a4e9ead0070 100644
--- a/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
+++ b/llvm/lib/Target/X86/X86SpeculativeLoadHardening.cpp
@@ -173,6 +173,7 @@ private:
MachineInstr *
sinkPostLoadHardenedInst(MachineInstr &MI,
SmallPtrSetImpl<MachineInstr *> &HardenedLoads);
+ bool canHardenPostLoad(MachineInstr &MI);
void hardenPostLoad(MachineInstr &MI, MachineSSAUpdater &PredStateSSA);
void checkReturnInstr(MachineInstr &MI, MachineSSAUpdater &PredStateSSA);
void checkCallInstr(MachineInstr &MI, MachineSSAUpdater &PredStateSSA);
@@ -1115,12 +1116,12 @@ void X86SpeculativeLoadHardeningPass::checkAllLoads(
(IndexReg && LoadDepRegs.test(IndexReg)))
continue;
- // If post-load hardening is enabled, this load is known to be
- // data-invariant, and we aren't already going to harden one of the
+ // If post-load hardening is enabled, this load is compatible with
+ // post-load hardening, and we aren't already going to harden one of the
// address registers, queue it up to be hardened post-load. Notably, even
// once hardened this won't introduce a useful dependency that could prune
// out subsequent loads.
- if (EnablePostLoadHardening && isDataInvariantLoad(MI) &&
+ if (EnablePostLoadHardening && canHardenPostLoad(MI) &&
!HardenedAddrRegs.count(BaseReg) &&
!HardenedAddrRegs.count(IndexReg)) {
HardenPostLoad.insert(&MI);
@@ -1602,6 +1603,25 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
return MI;
}
+bool X86SpeculativeLoadHardeningPass::canHardenPostLoad(MachineInstr &MI) {
+ if (!isDataInvariantLoad(MI))
+ return false;
+
+ auto &DefOp = MI.getOperand(0);
+ unsigned OldDefReg = DefOp.getReg();
+
+ auto *DefRC = MRI->getRegClass(OldDefReg);
+ int DefRegBytes = TRI->getRegSizeInBits(*DefRC) / 8;
+ if (DefRegBytes > 8)
+ // We don't support post-load hardening of vectors.
+ return false;
+
+ const TargetRegisterClass *GPRRegClasses[] = {
+ &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
+ &X86::GR64RegClass};
+ return DefRC->hasSuperClassEq(GPRRegClasses[Log2_32(DefRegBytes)]);
+}
+
// We can harden non-leaking loads into register without touching the address
// by just hiding all of the loaded bits. We use an `or` instruction to do
// this because having the poison value be all ones allows us to use the same
@@ -1609,8 +1629,8 @@ MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
// execution and coercing them to one is sufficient.
void X86SpeculativeLoadHardeningPass::hardenPostLoad(
MachineInstr &MI, MachineSSAUpdater &PredStateSSA) {
- assert(isDataInvariantLoad(MI) &&
- "Cannot get here with a non-invariant load!");
+ assert(canHardenPostLoad(MI) &&
+ "Invalid instruction for post-load hardening!");
MachineBasicBlock &MBB = *MI.getParent();
DebugLoc Loc = MI.getDebugLoc();
@@ -1625,14 +1645,6 @@ void X86SpeculativeLoadHardeningPass::hardenPostLoad(
unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
unsigned OrOpCode = OrOpCodes[Log2_32(DefRegBytes)];
-#ifndef NDEBUG
- const TargetRegisterClass *OrRegClasses[] = {
- &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
- &X86::GR64RegClass};
- assert(DefRC->hasSuperClassEq(OrRegClasses[Log2_32(DefRegBytes)]) &&
- "Cannot define this register with OR instruction!");
-#endif
-
unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
auto GetStateRegInRC = [&](const TargetRegisterClass &RC) {
OpenPOWER on IntegriCloud