diff options
author | Tim Shen <timshen91@gmail.com> | 2016-04-08 21:26:31 +0000 |
---|---|---|
committer | Tim Shen <timshen91@gmail.com> | 2016-04-08 21:26:31 +0000 |
commit | 0012756489bd46c1a5c3ff89ce281c14e88bee1f (patch) | |
tree | fea568a38ea17a075072117c87df36a094712b55 /llvm/lib | |
parent | c0a627524d0a96fb0108b1ab96f2f76e96a71a53 (diff) | |
download | bcm5719-llvm-0012756489bd46c1a5c3ff89ce281c14e88bee1f.tar.gz bcm5719-llvm-0012756489bd46c1a5c3ff89ce281c14e88bee1f.zip |
[SSP] Remove llvm.stackprotectorcheck.
This is a cleanup patch for SSP support in LLVM. There is no functional change.
llvm.stackprotectorcheck is not needed, because SelectionDAG isn't
actually lowering it in SelectBasicBlock; rather, it adds check code in
FinishBasicBlock, ignoring the position where the intrinsic is inserted
(See FindSplitPointForStackProtector()).
llvm-svn: 265851
Diffstat (limited to 'llvm/lib')
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp | 17 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h | 20 | ||||
-rw-r--r-- | llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp | 9 | ||||
-rw-r--r-- | llvm/lib/CodeGen/StackProtector.cpp | 126 | ||||
-rw-r--r-- | llvm/lib/CodeGen/TargetLoweringBase.cpp | 29 | ||||
-rw-r--r-- | llvm/lib/IR/AutoUpgrade.cpp | 11 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.cpp | 4 | ||||
-rw-r--r-- | llvm/lib/Target/AArch64/AArch64ISelLowering.h | 2 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 17 | ||||
-rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.h | 6 |
10 files changed, 109 insertions, 132 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 5ad42936169..27d24222aff 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -2014,7 +2014,10 @@ void SelectionDAGBuilder::visitSPDescriptorParent(StackProtectorDescriptor &SPD, MachineFrameInfo *MFI = ParentBB->getParent()->getFrameInfo(); int FI = MFI->getStackProtectorIndex(); - const Value *IRGuard = SPD.getGuard(); + const Module &M = *ParentBB->getParent()->getFunction()->getParent(); + const Value *IRGuard = TLI.getSDStackGuard(M); + assert(IRGuard && "Currently there must be an IR guard in order to use " + "SelectionDAG SSP"); SDValue GuardPtr = getValue(IRGuard); SDValue StackSlotPtr = DAG.getFrameIndex(FI, PtrTy); @@ -5517,18 +5520,6 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) { case Intrinsic::invariant_end: // Discard region information. return nullptr; - case Intrinsic::stackprotectorcheck: { - // Do not actually emit anything for this basic block. Instead we initialize - // the stack protector descriptor and export the guard variable so we can - // access it in FinishBasicBlock. - const BasicBlock *BB = I.getParent(); - SPDescriptor.initialize(BB, FuncInfo.MBBMap[BB], I); - ExportFromCurrentBlock(SPDescriptor.getGuard()); - - // Flush our exports since we are going to process a terminator. - (void)getControlRoot(); - return nullptr; - } case Intrinsic::clear_cache: return TLI.getClearCacheBuiltinName(); case Intrinsic::donothing: diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h index 2d2002680ba..47d8870cd95 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -464,29 +464,25 @@ private: /// the same function, use the same failure basic block). class StackProtectorDescriptor { public: - StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr), - FailureMBB(nullptr), Guard(nullptr), - GuardReg(0) { } + StackProtectorDescriptor() + : ParentMBB(nullptr), SuccessMBB(nullptr), FailureMBB(nullptr), + GuardReg(0) {} /// Returns true if all fields of the stack protector descriptor are /// initialized implying that we should/are ready to emit a stack protector. bool shouldEmitStackProtector() const { - return ParentMBB && SuccessMBB && FailureMBB && Guard; + return ParentMBB && SuccessMBB && FailureMBB; } /// Initialize the stack protector descriptor structure for a new basic /// block. - void initialize(const BasicBlock *BB, - MachineBasicBlock *MBB, - const CallInst &StackProtCheckCall) { + void initialize(const BasicBlock *BB, MachineBasicBlock *MBB) { // Make sure we are not initialized yet. assert(!shouldEmitStackProtector() && "Stack Protector Descriptor is " "already initialized!"); ParentMBB = MBB; SuccessMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ true); FailureMBB = AddSuccessorMBB(BB, MBB, /* IsLikely */ false, FailureMBB); - if (!Guard) - Guard = StackProtCheckCall.getArgOperand(0); } /// Reset state that changes when we handle different basic blocks. @@ -515,14 +511,12 @@ private: /// always the same. void resetPerFunctionState() { FailureMBB = nullptr; - Guard = nullptr; GuardReg = 0; } MachineBasicBlock *getParentMBB() { return ParentMBB; } MachineBasicBlock *getSuccessMBB() { return SuccessMBB; } MachineBasicBlock *getFailureMBB() { return FailureMBB; } - const Value *getGuard() { return Guard; } unsigned getGuardReg() const { return GuardReg; } void setGuardReg(unsigned R) { GuardReg = R; } @@ -545,10 +539,6 @@ private: /// contain a call to __stack_chk_fail(). MachineBasicBlock *FailureMBB; - /// The guard variable which we will compare against the stored value in the - /// stack protector stack slot. - const Value *Guard; - /// The virtual register holding the stack guard value. unsigned GuardReg; diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index e9cb6b43214..b0212c157c5 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -11,7 +11,7 @@ // //===----------------------------------------------------------------------===// -#include "llvm/CodeGen/GCStrategy.h" +#include "llvm/CodeGen/SelectionDAG.h" #include "ScheduleDAGSDNodes.h" #include "SelectionDAGBuilder.h" #include "llvm/ADT/PostOrderIterator.h" @@ -25,6 +25,7 @@ #include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/FunctionLoweringInfo.h" #include "llvm/CodeGen/GCMetadata.h" +#include "llvm/CodeGen/GCStrategy.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineInstrBuilder.h" @@ -32,8 +33,8 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h" #include "llvm/CodeGen/SchedulerRegistry.h" -#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAGISel.h" +#include "llvm/CodeGen/StackProtector.h" #include "llvm/CodeGen/WinEHFuncInfo.h" #include "llvm/IR/Constants.h" #include "llvm/IR/DebugInfo.h" @@ -377,6 +378,8 @@ SelectionDAGISel::~SelectionDAGISel() { void SelectionDAGISel::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired<AAResultsWrapperPass>(); AU.addRequired<GCModuleInfo>(); + AU.addRequired<StackProtector>(); + AU.addPreserved<StackProtector>(); AU.addPreserved<GCModuleInfo>(); AU.addRequired<TargetLibraryInfoWrapperPass>(); if (UseMBPI && OptLevel != CodeGenOpt::None) @@ -1476,6 +1479,8 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) { LowerArguments(Fn); } } + if (getAnalysis<StackProtector>().shouldEmitSDCheck(*LLVMBB)) + SDB->SPDescriptor.initialize(LLVMBB, FuncInfo->MBBMap[LLVMBB]); if (Begin != BI) ++NumDAGBlocks; diff --git a/llvm/lib/CodeGen/StackProtector.cpp b/llvm/lib/CodeGen/StackProtector.cpp index 23ae672944d..3ea56d85fe5 100644 --- a/llvm/lib/CodeGen/StackProtector.cpp +++ b/llvm/lib/CodeGen/StackProtector.cpp @@ -89,6 +89,8 @@ bool StackProtector::runOnFunction(Function &Fn) { getAnalysisIfAvailable<DominatorTreeWrapperPass>(); DT = DTWP ? &DTWP->getDomTree() : nullptr; TLI = TM->getSubtargetImpl(Fn)->getTargetLowering(); + HasPrologue = false; + HasIRCheck = false; Attribute Attr = Fn.getFnAttribute("stack-protector-buffer-size"); if (Attr.isStringAttribute() && @@ -200,11 +202,21 @@ bool StackProtector::HasAddressTaken(const Instruction *AI) { bool StackProtector::RequiresStackProtector() { bool Strong = false; bool NeedsProtector = false; + for (const BasicBlock &BB : *F) + for (const Instruction &I : BB) + if (const CallInst *CI = dyn_cast<CallInst>(&I)) + if (CI->getCalledFunction() == + Intrinsic::getDeclaration(F->getParent(), + Intrinsic::stackprotector)) + HasPrologue = true; + if (F->hasFnAttribute(Attribute::StackProtectReq)) { NeedsProtector = true; Strong = true; // Use the same heuristic as strong to determine SSPLayout } else if (F->hasFnAttribute(Attribute::StackProtectStrong)) Strong = true; + else if (HasPrologue) + NeedsProtector = true; else if (!F->hasFnAttribute(Attribute::StackProtect)) return false; @@ -256,68 +268,6 @@ bool StackProtector::RequiresStackProtector() { return NeedsProtector; } -static bool InstructionWillNotHaveChain(const Instruction *I) { - return !I->mayHaveSideEffects() && !I->mayReadFromMemory() && - isSafeToSpeculativelyExecute(I); -} - -/// Identify if RI has a previous instruction in the "Tail Position" and return -/// it. Otherwise return 0. -/// -/// This is based off of the code in llvm::isInTailCallPosition. The difference -/// is that it inverts the first part of llvm::isInTailCallPosition since -/// isInTailCallPosition is checking if a call is in a tail call position, and -/// we are searching for an unknown tail call that might be in the tail call -/// position. Once we find the call though, the code uses the same refactored -/// code, returnTypeIsEligibleForTailCall. -static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI, - const TargetLoweringBase *TLI) { - // Establish a reasonable upper bound on the maximum amount of instructions we - // will look through to find a tail call. - unsigned SearchCounter = 0; - const unsigned MaxSearch = 4; - bool NoInterposingChain = true; - - for (BasicBlock::reverse_iterator I = std::next(BB->rbegin()), E = BB->rend(); - I != E && SearchCounter < MaxSearch; ++I) { - Instruction *Inst = &*I; - - // Skip over debug intrinsics and do not allow them to affect our MaxSearch - // counter. - if (isa<DbgInfoIntrinsic>(Inst)) - continue; - - // If we find a call and the following conditions are satisifed, then we - // have found a tail call that satisfies at least the target independent - // requirements of a tail call: - // - // 1. The call site has the tail marker. - // - // 2. The call site either will not cause the creation of a chain or if a - // chain is necessary there are no instructions in between the callsite and - // the call which would create an interposing chain. - // - // 3. The return type of the function does not impede tail call - // optimization. - if (CallInst *CI = dyn_cast<CallInst>(Inst)) { - if (CI->isTailCall() && - (InstructionWillNotHaveChain(CI) || NoInterposingChain) && - returnTypeIsEligibleForTailCall(BB->getParent(), CI, RI, *TLI)) - return CI; - } - - // If we did not find a call see if we have an instruction that may create - // an interposing chain. - NoInterposingChain = - NoInterposingChain && InstructionWillNotHaveChain(Inst); - - // Increment max search. - SearchCounter++; - } - - return nullptr; -} - /// Insert code into the entry block that stores the __stack_chk_guard /// variable onto the stack: /// @@ -329,29 +279,25 @@ static CallInst *FindPotentialTailCall(BasicBlock *BB, ReturnInst *RI, /// Returns true if the platform/triple supports the stackprotectorcreate pseudo /// node. static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI, - const TargetLoweringBase *TLI, const Triple &TT, - AllocaInst *&AI, Value *&StackGuardVar) { + const TargetLoweringBase *TLI, AllocaInst *&AI, + Value *&StackGuardVar) { bool SupportsSelectionDAGSP = false; - PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext()); IRBuilder<> B(&F->getEntryBlock().front()); - StackGuardVar = TLI->getStackCookieLocation(B); + StackGuardVar = TLI->getIRStackGuard(B); if (!StackGuardVar) { - if (TT.isOSOpenBSD()) { - StackGuardVar = M->getOrInsertGlobal("__guard_local", PtrTy); - cast<GlobalValue>(StackGuardVar) - ->setVisibility(GlobalValue::HiddenVisibility); - } else { - SupportsSelectionDAGSP = true; - StackGuardVar = M->getOrInsertGlobal("__stack_chk_guard", PtrTy); - } + /// Use SelectionDAG SSP handling, since there isn't an IR guard. + SupportsSelectionDAGSP = true; + TLI->insertSSPDeclarations(*M); + StackGuardVar = TLI->getSDStackGuard(*M); } + assert(StackGuardVar && "Must have stack guard available"); + PointerType *PtrTy = Type::getInt8PtrTy(RI->getContext()); AI = B.CreateAlloca(PtrTy, nullptr, "StackGuardSlot"); LoadInst *LI = B.CreateLoad(StackGuardVar, "StackGuard"); B.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::stackprotector), {LI, AI}); - return SupportsSelectionDAGSP; } @@ -362,7 +308,6 @@ static bool CreatePrologue(Function *F, Module *M, ReturnInst *RI, /// - The epilogue checks the value stored in the prologue against the original /// value. It calls __stack_chk_fail if they differ. bool StackProtector::InsertStackProtectors() { - bool HasPrologue = false; bool SupportsSelectionDAGSP = EnableSelectionDAGSP && !TM->Options.EnableFastISel; AllocaInst *AI = nullptr; // Place on stack that stores the stack guard. @@ -377,27 +322,10 @@ bool StackProtector::InsertStackProtectors() { if (!HasPrologue) { HasPrologue = true; SupportsSelectionDAGSP &= - CreatePrologue(F, M, RI, TLI, Trip, AI, StackGuardVar); + CreatePrologue(F, M, RI, TLI, AI, StackGuardVar); } - if (SupportsSelectionDAGSP) { - // Since we have a potential tail call, insert the special stack check - // intrinsic. - Instruction *InsertionPt = nullptr; - if (CallInst *CI = FindPotentialTailCall(BB, RI, TLI)) { - InsertionPt = CI; - } else { - InsertionPt = RI; - // At this point we know that BB has a return statement so it *DOES* - // have a terminator. - assert(InsertionPt != nullptr && - "BB must have a terminator instruction at this point."); - } - - Function *Intrinsic = - Intrinsic::getDeclaration(M, Intrinsic::stackprotectorcheck); - CallInst::Create(Intrinsic, StackGuardVar, "", InsertionPt); - } else { + if (!SupportsSelectionDAGSP) { // If we do not support SelectionDAG based tail calls, generate IR level // tail calls. // @@ -428,6 +356,10 @@ bool StackProtector::InsertStackProtectors() { // fail BB generated by the stack protector pseudo instruction. BasicBlock *FailBB = CreateFailBB(); + // Set HasIRCheck to true, so that SelectionDAG will not generate its own + // version. + HasIRCheck = true; + // Split the basic block before the return instruction. BasicBlock *NewBB = BB->splitBasicBlock(RI->getIterator(), "SP_return"); @@ -487,3 +419,7 @@ BasicBlock *StackProtector::CreateFailBB() { B.CreateUnreachable(); return FailBB; } + +bool StackProtector::shouldEmitSDCheck(const BasicBlock &BB) const { + return HasPrologue && !HasIRCheck && dyn_cast<ReturnInst>(BB.getTerminator()); +} diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp index 7ec7006478a..8cadbb2dcd0 100644 --- a/llvm/lib/CodeGen/TargetLoweringBase.cpp +++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp @@ -1746,3 +1746,32 @@ bool TargetLoweringBase::isLegalAddressingMode(const DataLayout &DL, return true; } + +//===----------------------------------------------------------------------===// +// Stack Protector +//===----------------------------------------------------------------------===// + +// For OpenBSD return its special guard variable. Otherwise return nullptr, +// so that SelectionDAG handle SSP. +Value *TargetLoweringBase::getIRStackGuard(IRBuilder<> &IRB) const { + if (getTargetMachine().getTargetTriple().isOSOpenBSD()) { + Module &M = *IRB.GetInsertBlock()->getParent()->getParent(); + PointerType *PtrTy = Type::getInt8PtrTy(M.getContext()); + auto Guard = cast<GlobalValue>(M.getOrInsertGlobal("__guard_local", PtrTy)); + Guard->setVisibility(GlobalValue::HiddenVisibility); + return Guard; + } + return nullptr; +} + +// Currently only support "standard" __stack_chk_guard. +// TODO: add LOAD_STACK_GUARD support. +void TargetLoweringBase::insertSSPDeclarations(Module &M) const { + M.getOrInsertGlobal("__stack_chk_guard", Type::getInt8PtrTy(M.getContext())); +} + +// Currently only support "standard" __stack_chk_guard. +// TODO: add LOAD_STACK_GUARD support. +Value *TargetLoweringBase::getSDStackGuard(const Module &M) const { + return M.getGlobalVariable("__stack_chk_guard"); +} diff --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp index b9cee43b4db..121b43b7964 100644 --- a/llvm/lib/IR/AutoUpgrade.cpp +++ b/llvm/lib/IR/AutoUpgrade.cpp @@ -159,6 +159,12 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) { } break; + case 's': + if (Name == "stackprotectorcheck") { + NewFn = nullptr; + return true; + } + case 'x': { if (Name.startswith("x86.sse2.pcmpeq.") || Name.startswith("x86.sse2.pcmpgt.") || @@ -645,6 +651,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { Value *UndefV = UndefValue::get(Op0->getType()); Rep = Builder.CreateShuffleVector(Op0, UndefV, ConstantVector::get(Idxs)); + } else if (Name == "llvm.stackprotectorcheck") { + Rep = nullptr; } else { bool PD128 = false, PD256 = false, PS128 = false, PS256 = false; if (Name == "llvm.x86.avx.vpermil.pd.256") @@ -684,7 +692,8 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) { } } - CI->replaceAllUsesWith(Rep); + if (Rep) + CI->replaceAllUsesWith(Rep); CI->eraseFromParent(); return; } diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 2efe8473832..f9fb9c17cb5 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -10212,9 +10212,9 @@ bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &, return false; } -Value *AArch64TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const { +Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const { if (!Subtarget->isTargetAndroid()) - return TargetLowering::getStackCookieLocation(IRB); + return TargetLowering::getIRStackGuard(IRB); // Android provides a fixed TLS slot for the stack cookie. See the definition // of TLS_SLOT_STACK_GUARD in diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h index c9be8304a17..f106820bf6b 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -360,7 +360,7 @@ public: /// If the target has a standard location for the stack protector cookie, /// returns the address of that location. Otherwise, returns nullptr. - Value *getStackCookieLocation(IRBuilder<> &IRB) const override; + Value *getIRStackGuard(IRBuilder<> &IRB) const override; /// If the target has a standard location for the unsafe stack pointer, /// returns the address of that location. Otherwise, returns nullptr. diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index 4c2e54f8102..1931c7f4263 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2193,9 +2193,9 @@ unsigned X86TargetLowering::getAddressSpace() const { return 256; } -Value *X86TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const { +Value *X86TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const { if (!Subtarget.isTargetLinux()) - return TargetLowering::getStackCookieLocation(IRB); + return TargetLowering::getIRStackGuard(IRB); // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs: // %gs:0x14 on i386 @@ -2206,6 +2206,19 @@ Value *X86TargetLowering::getStackCookieLocation(IRBuilder<> &IRB) const { Type::getInt8PtrTy(IRB.getContext())->getPointerTo(AddressSpace)); } +void X86TargetLowering::insertSSPDeclarations(Module &M) const { + if (!Subtarget.isTargetLinux()) + TargetLowering::insertSSPDeclarations(M); + else + llvm_unreachable("X86 Linux supports customized IR stack guard load"); +} + +Value *X86TargetLowering::getSDStackGuard(const Module &M) const { + if (!Subtarget.isTargetLinux()) + return TargetLowering::getSDStackGuard(M); + llvm_unreachable("X86 Linux supports customized IR stack guard load"); +} + Value *X86TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const { if (!Subtarget.isTargetAndroid()) return TargetLowering::getSafeStackPointerLocation(IRB); diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h index 7007782ccf4..93705d68621 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.h +++ b/llvm/lib/Target/X86/X86ISelLowering.h @@ -962,7 +962,11 @@ namespace llvm { /// If the target has a standard location for the stack protector cookie, /// returns the address of that location. Otherwise, returns nullptr. - Value *getStackCookieLocation(IRBuilder<> &IRB) const override; + Value *getIRStackGuard(IRBuilder<> &IRB) const override; + + void insertSSPDeclarations(Module &M) const override; + + Value *getSDStackGuard(const Module &M) const override; /// Return true if the target stores SafeStack pointer at a fixed offset in /// some non-standard address space, and populates the address space and |