diff options
author | Yichao Yu <yyc1992@gmail.com> | 2016-12-15 22:36:53 +0000 |
---|---|---|
committer | Yichao Yu <yyc1992@gmail.com> | 2016-12-15 22:36:53 +0000 |
commit | 8f8cdd00dab0c56011806eb0e2fe211364be71f7 (patch) | |
tree | f197159d279cb6e101c314f089b5f418e6eeac06 /llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp | |
parent | d69b9414b3cbd2ba738e7d48159643c69633c4d2 (diff) | |
download | bcm5719-llvm-8f8cdd00dab0c56011806eb0e2fe211364be71f7.tar.gz bcm5719-llvm-8f8cdd00dab0c56011806eb0e2fe211364be71f7.zip |
Fix R_AARCH64_MOVW_UABS_G3 relocation
Summary: The relocation is missing mask so an address that has non-zero bits in 47:43 may overwrite the register number. (Frequently shows up as target register changed to `xzr`....)
Reviewers: t.p.northover, lhames
Subscribers: davide, aemerson, rengolin, llvm-commits
Differential Revision: https://reviews.llvm.org/D27609
llvm-svn: 289880
Diffstat (limited to 'llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp')
-rw-r--r-- | llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp | 72 |
1 files changed, 49 insertions, 23 deletions
diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp index c70e81a759f..a977dce06bb 100644 --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -325,6 +325,8 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, uint32_t *TargetPtr = reinterpret_cast<uint32_t *>(Section.getAddressWithOffset(Offset)); uint64_t FinalAddress = Section.getLoadAddressWithOffset(Offset); + // Data should use target endian. Code should always use little endian. + bool isBE = Arch == Triple::aarch64_be; DEBUG(dbgs() << "resolveAArch64Relocation, LocalAddress: 0x" << format("%llx", Section.getAddressWithOffset(Offset)) @@ -340,14 +342,22 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, case ELF::R_AARCH64_ABS64: { uint64_t *TargetPtr = reinterpret_cast<uint64_t *>(Section.getAddressWithOffset(Offset)); - *TargetPtr = Value + Addend; + if (isBE) + support::ubig64_t::ref{TargetPtr} = Value + Addend; + else + support::ulittle64_t::ref{TargetPtr} = Value + Addend; break; } case ELF::R_AARCH64_PREL32: { uint64_t Result = Value + Addend - FinalAddress; assert(static_cast<int64_t>(Result) >= INT32_MIN && static_cast<int64_t>(Result) <= UINT32_MAX); - *TargetPtr = static_cast<uint32_t>(Result & 0xffffffffU); + if (isBE) + support::ubig32_t::ref{TargetPtr} = + static_cast<uint32_t>(Result & 0xffffffffU); + else + support::ulittle32_t::ref{TargetPtr} = + static_cast<uint32_t>(Result & 0xffffffffU); break; } case ELF::R_AARCH64_CALL26: // fallthrough @@ -355,104 +365,120 @@ void RuntimeDyldELF::resolveAArch64Relocation(const SectionEntry &Section, // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the // calculation. uint64_t BranchImm = Value + Addend - FinalAddress; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // "Check that -2^27 <= result < 2^27". assert(isInt<28>(BranchImm)); // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xfc000000U; + TargetValue &= 0xfc000000U; // Immediate goes in bits 25:0 of B and BL. - *TargetPtr |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; + TargetValue |= static_cast<uint32_t>(BranchImm & 0xffffffcU) >> 2; + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_MOVW_UABS_G3: { uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffe0001fU; + TargetValue &= 0xffe0001fU; // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - *TargetPtr |= Result >> (48 - 5); + TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5)); // Shift must be "lsl #48", in bits 22:21 - assert((*TargetPtr >> 21 & 0x3) == 3 && "invalid shift for relocation"); + assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation"); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_MOVW_UABS_G2_NC: { uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffe0001fU; + TargetValue &= 0xffe0001fU; // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - *TargetPtr |= ((Result & 0xffff00000000ULL) >> (32 - 5)); + TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5)); // Shift must be "lsl #32", in bits 22:21 - assert((*TargetPtr >> 21 & 0x3) == 2 && "invalid shift for relocation"); + assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation"); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_MOVW_UABS_G1_NC: { uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffe0001fU; + TargetValue &= 0xffe0001fU; // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - *TargetPtr |= ((Result & 0xffff0000U) >> (16 - 5)); + TargetValue |= ((Result & 0xffff0000U) >> (16 - 5)); // Shift must be "lsl #16", in bits 22:2 - assert((*TargetPtr >> 21 & 0x3) == 1 && "invalid shift for relocation"); + assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation"); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_MOVW_UABS_G0_NC: { uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffe0001fU; + TargetValue &= 0xffe0001fU; // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - *TargetPtr |= ((Result & 0xffffU) << 5); + TargetValue |= ((Result & 0xffffU) << 5); // Shift must be "lsl #0", in bits 22:21. - assert((*TargetPtr >> 21 & 0x3) == 0 && "invalid shift for relocation"); + assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation"); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_ADR_PREL_PG_HI21: { // Operation: Page(S+A) - Page(P) uint64_t Result = ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // Check that -2^32 <= X < 2^32 assert(isInt<33>(Result) && "overflow check failed for relocation"); // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0x9f00001fU; + TargetValue &= 0x9f00001fU; // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken // from bits 32:12 of X. - *TargetPtr |= ((Result & 0x3000U) << (29 - 12)); - *TargetPtr |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); + TargetValue |= ((Result & 0x3000U) << (29 - 12)); + TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { // Operation: S + A uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffc003ffU; + TargetValue &= 0xffc003ffU; // Immediate goes in bits 21:10 of LD/ST instruction, taken // from bits 11:2 of X - *TargetPtr |= ((Result & 0xffc) << (10 - 2)); + TargetValue |= ((Result & 0xffc) << (10 - 2)); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { // Operation: S + A uint64_t Result = Value + Addend; + uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // AArch64 code is emitted with .rela relocations. The data already in any // bits affected by the relocation on entry is garbage. - *TargetPtr &= 0xffc003ffU; + TargetValue &= 0xffc003ffU; // Immediate goes in bits 21:10 of LD/ST instruction, taken // from bits 11:3 of X - *TargetPtr |= ((Result & 0xff8) << (10 - 3)); + TargetValue |= ((Result & 0xff8) << (10 - 3)); + support::ulittle32_t::ref{TargetPtr} = TargetValue; break; } } |