summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp13
-rw-r--r--llvm/lib/CodeGen/StackProtector.cpp6
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h4
-rw-r--r--llvm/lib/Target/X86/X86InstrCompiler.td9
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp15
6 files changed, 55 insertions, 5 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 1c31eca3ec9..f3addf05566 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -2148,11 +2148,14 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
unsigned Align = DL->getPrefTypeAlignment(Type::getInt8PtrTy(M.getContext()));
// Generate code to load the content of the guard slot.
- SDValue StackSlot = DAG.getLoad(
+ SDValue GuardVal = DAG.getLoad(
PtrTy, dl, DAG.getEntryNode(), StackSlotPtr,
MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), Align,
MachineMemOperand::MOVolatile);
+ if (TLI.useStackGuardXorFP())
+ GuardVal = TLI.emitStackGuardXorFP(DAG, GuardVal, dl);
+
// Retrieve guard check function, nullptr if instrumentation is inlined.
if (const Value *GuardCheck = TLI.getSSPStackGuardCheck(M)) {
// The target provides a guard check function to validate the guard value.
@@ -2164,7 +2167,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
- Entry.Node = StackSlot;
+ Entry.Node = GuardVal;
Entry.Ty = FnTy->getParamType(0);
if (Fn->hasAttribute(1, Attribute::AttrKind::InReg))
Entry.IsInReg = true;
@@ -2197,7 +2200,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
// Perform the comparison via a subtract/getsetcc.
EVT VT = Guard.getValueType();
- SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, StackSlot);
+ SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Guard, GuardVal);
SDValue Cmp = DAG.getSetCC(dl, TLI.getSetCCResultType(DAG.getDataLayout(),
*DAG.getContext(),
@@ -2207,7 +2210,7 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD,
// If the sub is not 0, then we know the guard/stackslot do not equal, so
// branch to failure MBB.
SDValue BrCond = DAG.getNode(ISD::BRCOND, dl,
- MVT::Other, StackSlot.getOperand(0),
+ MVT::Other, GuardVal.getOperand(0),
Cmp, DAG.getBasicBlock(SPD.getFailureMBB()));
// Otherwise branch to success MBB.
SDValue Br = DAG.getNode(ISD::BR, dl,
@@ -5646,6 +5649,8 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
MachinePointerInfo(Global, 0), Align,
MachineMemOperand::MOVolatile);
}
+ if (TLI.useStackGuardXorFP())
+ Res = TLI.emitStackGuardXorFP(DAG, Res, sdl);
DAG.setRoot(Chain);
setValue(&I, Res);
return nullptr;
diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp
index e3340028863..62cef95a4af 100644
--- a/llvm/lib/CodeGen/StackProtector.cpp
+++ b/llvm/lib/CodeGen/StackProtector.cpp
@@ -385,8 +385,12 @@ static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI,
/// - The epilogue checks the value stored in the prologue against the original
/// value. It calls __stack_chk_fail if they differ.
bool StackProtector::InsertStackProtectors() {
+ // If the target wants to XOR the frame pointer into the guard value, it's
+ // impossible to emit the check in IR, so the target *must* support stack
+ // protection in SDAG.
bool SupportsSelectionDAGSP =
- EnableSelectionDAGSP && !TM->Options.EnableFastISel;
+ TLI->useStackGuardXorFP() ||
+ (EnableSelectionDAGSP && !TM->Options.EnableFastISel);
AllocaInst *AI = nullptr; // Place on stack that stores the stack guard.
for (Function::iterator I = F->begin(), E = F->end(); I != E;) {
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 6d5712dbf67..5f013753ea8 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1687,6 +1687,19 @@ bool X86TargetLowering::useLoadStackGuardNode() const {
return Subtarget.isTargetMachO() && Subtarget.is64Bit();
}
+bool X86TargetLowering::useStackGuardXorFP() const {
+ // Currently only MSVC CRTs XOR the frame pointer into the stack guard value.
+ return Subtarget.getTargetTriple().isOSMSVCRT();
+}
+
+SDValue X86TargetLowering::emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
+ const SDLoc &DL) const {
+ EVT PtrTy = getPointerTy(DAG.getDataLayout());
+ unsigned XorOp = Subtarget.is64Bit() ? X86::XOR64_FP : X86::XOR32_FP;
+ MachineSDNode *Node = DAG.getMachineNode(XorOp, DL, PtrTy, Val);
+ return SDValue(Node, 0);
+}
+
TargetLoweringBase::LegalizeTypeAction
X86TargetLowering::getPreferredVectorAction(EVT VT) const {
if (ExperimentalVectorWideningLegalization &&
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 90830f4d5d1..d31104f9433 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1055,9 +1055,13 @@ namespace llvm {
Value *getIRStackGuard(IRBuilder<> &IRB) const override;
bool useLoadStackGuardNode() const override;
+ bool useStackGuardXorFP() const override;
void insertSSPDeclarations(Module &M) const override;
Value *getSDagStackGuard(const Module &M) const override;
Value *getSSPStackGuardCheck(const Module &M) const override;
+ SDValue emitStackGuardXorFP(SelectionDAG &DAG, SDValue Val,
+ const SDLoc &DL) const override;
+
/// Return true if the target stores SafeStack pointer at a fixed offset in
/// some non-standard address space, and populates the address space and
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td
index 82885687bb4..0b63f376302 100644
--- a/llvm/lib/Target/X86/X86InstrCompiler.td
+++ b/llvm/lib/Target/X86/X86InstrCompiler.td
@@ -142,6 +142,15 @@ def WIN_ALLOCA_64 : I<0, Pseudo, (outs), (ins GR64:$size),
[(X86WinAlloca GR64:$size)]>,
Requires<[In64BitMode]>;
+// These instructions XOR the frame pointer into a GPR. They are used in some
+// stack protection schemes. These are post-RA pseudos because we only know the
+// frame register after register allocation.
+let Constraints = "$src = $dst", isPseudo = 1, Defs = [EFLAGS] in {
+ def XOR32_FP : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src),
+ "xorl\t$$FP, $src", []>, Requires<[NotLP64]>;
+ def XOR64_FP : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src),
+ "xorq\t$$FP $src", []>, Requires<[In64BitMode]>;
+}
//===----------------------------------------------------------------------===//
// EH Pseudo Instructions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index a5bff06e70b..96f19d35815 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -7762,6 +7762,18 @@ static void expandLoadStackGuard(MachineInstrBuilder &MIB,
MIB.addReg(Reg, RegState::Kill).addImm(1).addReg(0).addImm(0).addReg(0);
}
+static bool expandXorFP(MachineInstrBuilder &MIB, const TargetInstrInfo &TII) {
+ MachineBasicBlock &MBB = *MIB->getParent();
+ MachineFunction &MF = *MBB.getParent();
+ const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
+ const X86RegisterInfo *TRI = Subtarget.getRegisterInfo();
+ unsigned XorOp =
+ MIB->getOpcode() == X86::XOR64_FP ? X86::XOR64rr : X86::XOR32rr;
+ MIB->setDesc(TII.get(XorOp));
+ MIB.addReg(TRI->getFrameRegister(MF), RegState::Undef);
+ return true;
+}
+
// This is used to handle spills for 128/256-bit registers when we have AVX512,
// but not VLX. If it uses an extended register we need to use an instruction
// that loads the lower 128/256-bit, but is available with only AVX512F.
@@ -7956,6 +7968,9 @@ bool X86InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
case TargetOpcode::LOAD_STACK_GUARD:
expandLoadStackGuard(MIB, *this);
return true;
+ case X86::XOR64_FP:
+ case X86::XOR32_FP:
+ return expandXorFP(MIB, *this);
}
return false;
}
OpenPOWER on IntegriCloud