diff options
Diffstat (limited to 'llvm/lib/Target')
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64.h | 2 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64BranchTargets.cpp | 130 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/AArch64TargetMachine.cpp | 9 | ||||
| -rw-r--r-- | llvm/lib/Target/AArch64/CMakeLists.txt | 1 |
4 files changed, 142 insertions, 0 deletions
diff --git a/llvm/lib/Target/AArch64/AArch64.h b/llvm/lib/Target/AArch64/AArch64.h index 74f22e287f8..6472dcd5157 100644 --- a/llvm/lib/Target/AArch64/AArch64.h +++ b/llvm/lib/Target/AArch64/AArch64.h @@ -46,6 +46,7 @@ FunctionPass *createAArch64A57FPLoadBalancing(); FunctionPass *createAArch64A53Fix835769(); FunctionPass *createFalkorHWPFFixPass(); FunctionPass *createFalkorMarkStridedAccessesPass(); +FunctionPass *createAArch64BranchTargetsPass(); FunctionPass *createAArch64CleanupLocalDynamicTLSPass(); @@ -58,6 +59,7 @@ FunctionPass *createAArch64PreLegalizeCombiner(); void initializeAArch64A53Fix835769Pass(PassRegistry&); void initializeAArch64A57FPLoadBalancingPass(PassRegistry&); void initializeAArch64AdvSIMDScalarPass(PassRegistry&); +void initializeAArch64BranchTargetsPass(PassRegistry&); void initializeAArch64CollectLOHPass(PassRegistry&); void initializeAArch64CondBrTuningPass(PassRegistry &); void initializeAArch64ConditionalComparesPass(PassRegistry&); diff --git a/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp new file mode 100644 index 00000000000..da70a624c5b --- /dev/null +++ b/llvm/lib/Target/AArch64/AArch64BranchTargets.cpp @@ -0,0 +1,130 @@ +//===-- AArch64BranchTargets.cpp -- Harden code using v8.5-A BTI extension -==// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass inserts BTI instructions at the start of every function and basic +// block which could be indirectly called. The hardware will (when enabled) +// trap when an indirect branch or call instruction targets an instruction +// which is not a valid BTI instruction. This is intended to guard against +// control-flow hijacking attacks. Note that this does not do anything for RET +// instructions, as they can be more precisely protected by return address +// signing. +// +//===----------------------------------------------------------------------===// + +#include "AArch64Subtarget.h" +#include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" +#include "llvm/CodeGen/MachineJumpTableInfo.h" +#include "llvm/CodeGen/MachineModuleInfo.h" +#include "llvm/Support/Debug.h" + +using namespace llvm; + +#define DEBUG_TYPE "aarch64-branch-targets" +#define AARCH64_BRANCH_TARGETS_NAME "AArch64 Branch Targets" + +namespace { +class AArch64BranchTargets : public MachineFunctionPass { +public: + static char ID; + AArch64BranchTargets() : MachineFunctionPass(ID) {} + void getAnalysisUsage(AnalysisUsage &AU) const override; + bool runOnMachineFunction(MachineFunction &MF) override; + StringRef getPassName() const override { return AARCH64_BRANCH_TARGETS_NAME; } + +private: + void addBTI(MachineBasicBlock &MBB, bool CouldCall, bool CouldJump); +}; +} // end anonymous namespace + +char AArch64BranchTargets::ID = 0; + +INITIALIZE_PASS(AArch64BranchTargets, "aarch64-branch-targets", + AARCH64_BRANCH_TARGETS_NAME, false, false) + +void AArch64BranchTargets::getAnalysisUsage(AnalysisUsage &AU) const { + AU.setPreservesCFG(); + MachineFunctionPass::getAnalysisUsage(AU); +} + +FunctionPass *llvm::createAArch64BranchTargetsPass() { + return new AArch64BranchTargets(); +} + +bool AArch64BranchTargets::runOnMachineFunction(MachineFunction &MF) { + const Function &F = MF.getFunction(); + if (!F.hasFnAttribute("branch-target-enforcement")) + return false; + + LLVM_DEBUG( + dbgs() << "********** AArch64 Branch Targets **********\n" + << "********** Function: " << MF.getName() << '\n'); + + // LLVM does not consider basic blocks which are the targets of jump tables + // to be address-taken (the address can't escape anywhere else), but they are + // used for indirect branches, so need BTI instructions. + SmallPtrSet<MachineBasicBlock *, 8> JumpTableTargets; + if (auto *JTI = MF.getJumpTableInfo()) + for (auto &JTE : JTI->getJumpTables()) + for (auto *MBB : JTE.MBBs) + JumpTableTargets.insert(MBB); + + bool MadeChange = false; + for (MachineBasicBlock &MBB : MF) { + bool CouldCall = false, CouldJump = false; + // If the function is address-taken or externally-visible, it could be + // indirectly called. PLT entries and tail-calls use BR, but when they are + // are in guarded pages should all use x16 or x17 to hold the called + // address, so we don't need to set CouldJump here. BR instructions in + // non-guarded pages (which might be non-BTI-aware code) are allowed to + // branch to a "BTI c" using any register. + if (&MBB == &*MF.begin() && (F.hasAddressTaken() || !F.hasLocalLinkage())) + CouldCall = true; + + // If the block itself is address-taken, it could be indirectly branched + // to, but not called. + if (MBB.hasAddressTaken() || JumpTableTargets.count(&MBB)) + CouldJump = true; + + if (CouldCall || CouldJump) { + addBTI(MBB, CouldCall, CouldJump); + MadeChange = true; + } + } + + return MadeChange; +} + +void AArch64BranchTargets::addBTI(MachineBasicBlock &MBB, bool CouldCall, + bool CouldJump) { + LLVM_DEBUG(dbgs() << "Adding BTI " << (CouldJump ? "j" : "") + << (CouldCall ? "c" : "") << " to " << MBB.getName() + << "\n"); + + const AArch64InstrInfo *TII = static_cast<const AArch64InstrInfo *>( + MBB.getParent()->getSubtarget().getInstrInfo()); + + unsigned HintNum = 32; + if (CouldCall) + HintNum |= 2; + if (CouldJump) + HintNum |= 4; + assert(HintNum != 32 && "No target kinds!"); + + auto MBBI = MBB.begin(); + + // PACI[AB]SP are implicitly BTI JC, so no BTI instruction needed there. + if (MBBI != MBB.end() && (MBBI->getOpcode() == AArch64::PACIASP || + MBBI->getOpcode() == AArch64::PACIBSP)) + return; + + BuildMI(MBB, MBB.begin(), MBB.findDebugLoc(MBB.begin()), + TII->get(AArch64::HINT)) + .addImm(HintNum); +} diff --git a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp index a66f5277f24..e183288d8df 100644 --- a/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp +++ b/llvm/lib/Target/AArch64/AArch64TargetMachine.cpp @@ -141,6 +141,11 @@ static cl::opt<int> EnableGlobalISelAtO( static cl::opt<bool> EnableFalkorHWPFFix("aarch64-enable-falkor-hwpf-fix", cl::init(true), cl::Hidden); +static cl::opt<bool> + EnableBranchTargets("aarch64-enable-branch-targets", cl::Hidden, + cl::desc("Enable the AAcrh64 branch target pass"), + cl::init(true)); + extern "C" void LLVMInitializeAArch64Target() { // Register the target. RegisterTargetMachine<AArch64leTargetMachine> X(getTheAArch64leTarget()); @@ -151,6 +156,7 @@ extern "C" void LLVMInitializeAArch64Target() { initializeAArch64A53Fix835769Pass(*PR); initializeAArch64A57FPLoadBalancingPass(*PR); initializeAArch64AdvSIMDScalarPass(*PR); + initializeAArch64BranchTargetsPass(*PR); initializeAArch64CollectLOHPass(*PR); initializeAArch64ConditionalComparesPass(*PR); initializeAArch64ConditionOptimizerPass(*PR); @@ -537,6 +543,9 @@ void AArch64PassConfig::addPreEmitPass() { if (BranchRelaxation) addPass(&BranchRelaxationPassID); + if (EnableBranchTargets) + addPass(createAArch64BranchTargetsPass()); + if (TM->getOptLevel() != CodeGenOpt::None && EnableCollectLOH && TM->getTargetTriple().isOSBinFormatMachO()) addPass(createAArch64CollectLOHPass()); diff --git a/llvm/lib/Target/AArch64/CMakeLists.txt b/llvm/lib/Target/AArch64/CMakeLists.txt index e6ca69c1971..c57ebeb854c 100644 --- a/llvm/lib/Target/AArch64/CMakeLists.txt +++ b/llvm/lib/Target/AArch64/CMakeLists.txt @@ -22,6 +22,7 @@ add_llvm_target(AArch64CodeGen AArch64A57FPLoadBalancing.cpp AArch64AdvSIMDScalarPass.cpp AArch64AsmPrinter.cpp + AArch64BranchTargets.cpp AArch64CallLowering.cpp AArch64CleanupLocalDynamicTLSPass.cpp AArch64CollectLOH.cpp |

