summaryrefslogtreecommitdiffstats
path: root/lld/ELF/InputSection.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'lld/ELF/InputSection.cpp')
-rw-r--r--lld/ELF/InputSection.cpp88
1 files changed, 44 insertions, 44 deletions
diff --git a/lld/ELF/InputSection.cpp b/lld/ELF/InputSection.cpp
index dcd68e74d63..60ad5a9113e 100644
--- a/lld/ELF/InputSection.cpp
+++ b/lld/ELF/InputSection.cpp
@@ -60,8 +60,8 @@ DenseMap<SectionBase *, int> elf::buildSectionOrder() {
// Build a map from sections to their priorities.
for (InputFile *File : ObjectFiles) {
- for (Symbol *Body : File->getSymbols()) {
- auto *D = dyn_cast<DefinedRegular>(Body);
+ for (Symbol *Sym : File->getSymbols()) {
+ auto *D = dyn_cast<DefinedRegular>(Sym);
if (!D || !D->Section)
continue;
int &Priority = SectionOrder[D->Section];
@@ -380,7 +380,7 @@ void InputSection::copyRelocations(uint8_t *Buf, ArrayRef<RelTy> Rels) {
for (const RelTy &Rel : Rels) {
RelType Type = Rel.getType(Config->IsMips64EL);
- Symbol &Body = this->getFile<ELFT>()->getRelocTargetSym(Rel);
+ Symbol &Sym = this->getFile<ELFT>()->getRelocTargetSym(Rel);
auto *P = reinterpret_cast<typename ELFT::Rela *>(Buf);
Buf += sizeof(RelTy);
@@ -391,10 +391,10 @@ void InputSection::copyRelocations(uint8_t *Buf, ArrayRef<RelTy> Rels) {
// Output section VA is zero for -r, so r_offset is an offset within the
// section, but for --emit-relocs it is an virtual address.
P->r_offset = Sec->getOutputSection()->Addr + Sec->getOffset(Rel.r_offset);
- P->setSymbolAndType(InX::SymTab->getSymbolIndex(&Body), Type,
+ P->setSymbolAndType(InX::SymTab->getSymbolIndex(&Sym), Type,
Config->IsMips64EL);
- if (Body.Type == STT_SECTION) {
+ if (Sym.Type == STT_SECTION) {
// We combine multiple section symbols into only one per
// section. This means we have to update the addend. That is
// trivial for Elf_Rela, but for Elf_Rel we have to write to the
@@ -404,19 +404,19 @@ void InputSection::copyRelocations(uint8_t *Buf, ArrayRef<RelTy> Rels) {
// avoid having to parse and recreate .eh_frame, we just replace any
// relocation in it pointing to discarded sections with R_*_NONE, which
// hopefully creates a frame that is ignored at runtime.
- SectionBase *Section = cast<DefinedRegular>(Body).Section;
+ SectionBase *Section = cast<DefinedRegular>(Sym).Section;
if (Section == &InputSection::Discarded) {
P->setSymbolAndType(0, 0, false);
continue;
}
if (Config->IsRela) {
- P->r_addend += Body.getVA() - Section->getOutputSection()->Addr;
+ P->r_addend += Sym.getVA() - Section->getOutputSection()->Addr;
} else if (Config->Relocatable) {
const uint8_t *BufLoc = Sec->Data.begin() + Rel.r_offset;
Sec->Relocations.push_back({R_ABS, Type, Rel.r_offset,
Target->getImplicitAddend(BufLoc, Type),
- &Body});
+ &Sym});
}
}
@@ -484,55 +484,55 @@ static uint64_t getAArch64UndefinedRelativeWeakVA(uint64_t Type, uint64_t A,
// ARM SBREL relocations are of the form S + A - B where B is the static base
// The ARM ABI defines base to be "addressing origin of the output segment
// defining the symbol S". We defined the "addressing origin"/static base to be
-// the base of the PT_LOAD segment containing the Body.
+// the base of the PT_LOAD segment containing the Sym.
// The procedure call standard only defines a Read Write Position Independent
// RWPI variant so in practice we should expect the static base to be the base
// of the RW segment.
-static uint64_t getARMStaticBase(const Symbol &Body) {
- OutputSection *OS = Body.getOutputSection();
+static uint64_t getARMStaticBase(const Symbol &Sym) {
+ OutputSection *OS = Sym.getOutputSection();
if (!OS || !OS->PtLoad || !OS->PtLoad->FirstSec)
- fatal("SBREL relocation to " + Body.getName() + " without static base");
+ fatal("SBREL relocation to " + Sym.getName() + " without static base");
return OS->PtLoad->FirstSec->Addr;
}
static uint64_t getRelocTargetVA(RelType Type, int64_t A, uint64_t P,
- const Symbol &Body, RelExpr Expr) {
+ const Symbol &Sym, RelExpr Expr) {
switch (Expr) {
case R_INVALID:
return 0;
case R_ABS:
case R_RELAX_GOT_PC_NOPIC:
- return Body.getVA(A);
+ return Sym.getVA(A);
case R_ARM_SBREL:
- return Body.getVA(A) - getARMStaticBase(Body);
+ return Sym.getVA(A) - getARMStaticBase(Sym);
case R_GOT:
case R_RELAX_TLS_GD_TO_IE_ABS:
- return Body.getGotVA() + A;
+ return Sym.getGotVA() + A;
case R_GOTONLY_PC:
return InX::Got->getVA() + A - P;
case R_GOTONLY_PC_FROM_END:
return InX::Got->getVA() + A - P + InX::Got->getSize();
case R_GOTREL:
- return Body.getVA(A) - InX::Got->getVA();
+ return Sym.getVA(A) - InX::Got->getVA();
case R_GOTREL_FROM_END:
- return Body.getVA(A) - InX::Got->getVA() - InX::Got->getSize();
+ return Sym.getVA(A) - InX::Got->getVA() - InX::Got->getSize();
case R_GOT_FROM_END:
case R_RELAX_TLS_GD_TO_IE_END:
- return Body.getGotOffset() + A - InX::Got->getSize();
+ return Sym.getGotOffset() + A - InX::Got->getSize();
case R_GOT_OFF:
- return Body.getGotOffset() + A;
+ return Sym.getGotOffset() + A;
case R_GOT_PAGE_PC:
case R_RELAX_TLS_GD_TO_IE_PAGE_PC:
- return getAArch64Page(Body.getGotVA() + A) - getAArch64Page(P);
+ return getAArch64Page(Sym.getGotVA() + A) - getAArch64Page(P);
case R_GOT_PC:
case R_RELAX_TLS_GD_TO_IE:
- return Body.getGotVA() + A - P;
+ return Sym.getGotVA() + A - P;
case R_HINT:
case R_NONE:
case R_TLSDESC_CALL:
llvm_unreachable("cannot relocate hint relocs");
case R_MIPS_GOTREL:
- return Body.getVA(A) - InX::MipsGot->getGp();
+ return Sym.getVA(A) - InX::MipsGot->getGp();
case R_MIPS_GOT_GP:
return InX::MipsGot->getGp() + A;
case R_MIPS_GOT_GP_PC: {
@@ -549,33 +549,33 @@ static uint64_t getRelocTargetVA(RelType Type, int64_t A, uint64_t P,
// If relocation against MIPS local symbol requires GOT entry, this entry
// should be initialized by 'page address'. This address is high 16-bits
// of sum the symbol's value and the addend.
- return InX::MipsGot->getVA() + InX::MipsGot->getPageEntryOffset(Body, A) -
+ return InX::MipsGot->getVA() + InX::MipsGot->getPageEntryOffset(Sym, A) -
InX::MipsGot->getGp();
case R_MIPS_GOT_OFF:
case R_MIPS_GOT_OFF32:
// In case of MIPS if a GOT relocation has non-zero addend this addend
// should be applied to the GOT entry content not to the GOT entry offset.
// That is why we use separate expression type.
- return InX::MipsGot->getVA() + InX::MipsGot->getBodyEntryOffset(Body, A) -
+ return InX::MipsGot->getVA() + InX::MipsGot->getSymEntryOffset(Sym, A) -
InX::MipsGot->getGp();
case R_MIPS_TLSGD:
return InX::MipsGot->getVA() + InX::MipsGot->getTlsOffset() +
- InX::MipsGot->getGlobalDynOffset(Body) - InX::MipsGot->getGp();
+ InX::MipsGot->getGlobalDynOffset(Sym) - InX::MipsGot->getGp();
case R_MIPS_TLSLD:
return InX::MipsGot->getVA() + InX::MipsGot->getTlsOffset() +
InX::MipsGot->getTlsIndexOff() - InX::MipsGot->getGp();
case R_PAGE_PC:
case R_PLT_PAGE_PC: {
uint64_t Dest;
- if (Body.isUndefWeak())
+ if (Sym.isUndefWeak())
Dest = getAArch64Page(A);
else
- Dest = getAArch64Page(Body.getVA(A));
+ Dest = getAArch64Page(Sym.getVA(A));
return Dest - getAArch64Page(P);
}
case R_PC: {
uint64_t Dest;
- if (Body.isUndefWeak()) {
+ if (Sym.isUndefWeak()) {
// On ARM and AArch64 a branch to an undefined weak resolves to the
// next instruction, otherwise the place.
if (Config->EMachine == EM_ARM)
@@ -583,19 +583,19 @@ static uint64_t getRelocTargetVA(RelType Type, int64_t A, uint64_t P,
else if (Config->EMachine == EM_AARCH64)
Dest = getAArch64UndefinedRelativeWeakVA(Type, A, P);
else
- Dest = Body.getVA(A);
+ Dest = Sym.getVA(A);
} else {
- Dest = Body.getVA(A);
+ Dest = Sym.getVA(A);
}
return Dest - P;
}
case R_PLT:
- return Body.getPltVA() + A;
+ return Sym.getPltVA() + A;
case R_PLT_PC:
case R_PPC_PLT_OPD:
- return Body.getPltVA() + A - P;
+ return Sym.getPltVA() + A - P;
case R_PPC_OPD: {
- uint64_t SymVA = Body.getVA(A);
+ uint64_t SymVA = Sym.getVA(A);
// If we have an undefined weak symbol, we might get here with a symbol
// address of zero. That could overflow, but the code must be unreachable,
// so don't bother doing anything at all.
@@ -615,7 +615,7 @@ static uint64_t getRelocTargetVA(RelType Type, int64_t A, uint64_t P,
case R_PPC_TOC:
return getPPC64TocBase() + A;
case R_RELAX_GOT_PC:
- return Body.getVA(A) - P;
+ return Sym.getVA(A) - P;
case R_RELAX_TLS_GD_TO_LE:
case R_RELAX_TLS_IE_TO_LE:
case R_RELAX_TLS_LD_TO_LE:
@@ -625,25 +625,25 @@ static uint64_t getRelocTargetVA(RelType Type, int64_t A, uint64_t P,
// lld and .tbss is not referenced, it gets reclaimed and we don't
// create a TLS program header. Therefore, we resolve this
// statically to zero.
- if (Body.isTls() && Body.isUndefWeak())
+ if (Sym.isTls() && Sym.isUndefWeak())
return 0;
if (Target->TcbSize)
- return Body.getVA(A) + alignTo(Target->TcbSize, Out::TlsPhdr->p_align);
- return Body.getVA(A) - Out::TlsPhdr->p_memsz;
+ return Sym.getVA(A) + alignTo(Target->TcbSize, Out::TlsPhdr->p_align);
+ return Sym.getVA(A) - Out::TlsPhdr->p_memsz;
case R_RELAX_TLS_GD_TO_LE_NEG:
case R_NEG_TLS:
- return Out::TlsPhdr->p_memsz - Body.getVA(A);
+ return Out::TlsPhdr->p_memsz - Sym.getVA(A);
case R_SIZE:
- return A; // Body.getSize was already folded into the addend.
+ return A; // Sym.getSize was already folded into the addend.
case R_TLSDESC:
- return InX::Got->getGlobalDynAddr(Body) + A;
+ return InX::Got->getGlobalDynAddr(Sym) + A;
case R_TLSDESC_PAGE:
- return getAArch64Page(InX::Got->getGlobalDynAddr(Body) + A) -
+ return getAArch64Page(InX::Got->getGlobalDynAddr(Sym) + A) -
getAArch64Page(P);
case R_TLSGD:
- return InX::Got->getGlobalDynOffset(Body) + A - InX::Got->getSize();
+ return InX::Got->getGlobalDynOffset(Sym) + A - InX::Got->getSize();
case R_TLSGD_PC:
- return InX::Got->getGlobalDynAddr(Body) + A - P;
+ return InX::Got->getGlobalDynAddr(Sym) + A - P;
case R_TLSLD:
return InX::Got->getTlsIndexOff() + A - InX::Got->getSize();
case R_TLSLD_PC:
OpenPOWER on IntegriCloud