1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
|
//===- Target.cpp ---------------------------------------------------------===//
//
// The LLVM Linker
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Machine-specific things, such as applying relocations, creation of
// GOT or PLT entries, etc., are handled in this file.
//
// Refer the ELF spec for the single letter varaibles, S, A or P, used
// in this file. SA is S+A.
//
//===----------------------------------------------------------------------===//
#include "Target.h"
#include "Error.h"
#include "OutputSections.h"
#include "Symbols.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Object/ELF.h"
#include "llvm/Support/Endian.h"
#include "llvm/Support/ELF.h"
using namespace llvm;
using namespace llvm::object;
using namespace llvm::support::endian;
using namespace llvm::ELF;
namespace lld {
namespace elf2 {
std::unique_ptr<TargetInfo> Target;
static void add32le(uint8_t *L, int32_t V) { write32le(L, read32le(L) + V); }
static void add32be(uint8_t *L, int32_t V) { write32be(L, read32be(L) + V); }
static void or32le(uint8_t *L, int32_t V) { write32le(L, read32le(L) | V); }
template <bool IsLE> static void add32(uint8_t *L, int32_t V);
template <> void add32<true>(uint8_t *L, int32_t V) { add32le(L, V); }
template <> void add32<false>(uint8_t *L, int32_t V) { add32be(L, V); }
namespace {
class X86TargetInfo final : public TargetInfo {
public:
X86TargetInfo();
void writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const override;
bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
bool relocPointsToGot(uint32_t Type) const override;
bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
void relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const override;
};
class X86_64TargetInfo final : public TargetInfo {
public:
X86_64TargetInfo();
unsigned getPLTRefReloc(unsigned Type) const override;
void writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const override;
bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
void relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const override;
bool isRelRelative(uint32_t Type) const override;
};
class PPC64TargetInfo final : public TargetInfo {
public:
PPC64TargetInfo();
void writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const override;
bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
void relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const override;
bool isRelRelative(uint32_t Type) const override;
};
class AArch64TargetInfo final : public TargetInfo {
public:
AArch64TargetInfo();
void writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const override;
bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
void relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const override;
};
template <class ELFT> class MipsTargetInfo final : public TargetInfo {
public:
MipsTargetInfo();
void writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const override;
bool relocNeedsGot(uint32_t Type, const SymbolBody &S) const override;
bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const override;
void relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const override;
};
} // anonymous namespace
TargetInfo *createTarget() {
switch (Config->EMachine) {
case EM_386:
return new X86TargetInfo();
case EM_AARCH64:
return new AArch64TargetInfo();
case EM_MIPS:
switch (Config->EKind) {
case ELF32LEKind:
return new MipsTargetInfo<ELF32LE>();
case ELF32BEKind:
return new MipsTargetInfo<ELF32BE>();
default:
error("Unsupported MIPS target");
}
case EM_PPC64:
return new PPC64TargetInfo();
case EM_X86_64:
return new X86_64TargetInfo();
}
error("Unknown target machine");
}
TargetInfo::~TargetInfo() {}
unsigned TargetInfo::getPLTRefReloc(unsigned Type) const { return PCRelReloc; }
bool TargetInfo::relocPointsToGot(uint32_t Type) const { return false; }
bool TargetInfo::isRelRelative(uint32_t Type) const { return true; }
X86TargetInfo::X86TargetInfo() {
PCRelReloc = R_386_PC32;
GotReloc = R_386_GLOB_DAT;
GotRefReloc = R_386_GOT32;
}
void X86TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const {
// jmpl *val; nop; nop
const uint8_t Inst[] = {0xff, 0x25, 0, 0, 0, 0, 0x90, 0x90};
memcpy(Buf, Inst, sizeof(Inst));
assert(isUInt<32>(GotEntryAddr));
write32le(Buf + 2, GotEntryAddr);
}
bool X86TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
return Type == R_386_GOT32 || relocNeedsPlt(Type, S);
}
bool X86TargetInfo::relocPointsToGot(uint32_t Type) const {
return Type == R_386_GOTPC;
}
bool X86TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
return Type == R_386_PLT32 || (Type == R_386_PC32 && S.isShared());
}
void X86TargetInfo::relocateOne(uint8_t *Buf, uint8_t *BufEnd, const void *RelP,
uint32_t Type, uint64_t BaseAddr,
uint64_t SA) const {
typedef ELFFile<ELF32LE>::Elf_Rel Elf_Rel;
auto &Rel = *reinterpret_cast<const Elf_Rel *>(RelP);
uint32_t Offset = Rel.r_offset;
uint8_t *Loc = Buf + Offset;
switch (Type) {
case R_386_GOT32:
add32le(Loc, SA - Out<ELF32LE>::Got->getVA());
break;
case R_386_PC32:
add32le(Loc, SA - BaseAddr - Offset);
break;
case R_386_32:
add32le(Loc, SA);
break;
default:
error("unrecognized reloc " + Twine(Type));
}
}
X86_64TargetInfo::X86_64TargetInfo() {
PCRelReloc = R_X86_64_PC32;
GotReloc = R_X86_64_GLOB_DAT;
GotRefReloc = R_X86_64_PC32;
RelativeReloc = R_X86_64_RELATIVE;
}
void X86_64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const {
// jmpq *val(%rip); nop; nop
const uint8_t Inst[] = {0xff, 0x25, 0, 0, 0, 0, 0x90, 0x90};
memcpy(Buf, Inst, sizeof(Inst));
uint64_t NextPC = PltEntryAddr + 6;
int64_t Delta = GotEntryAddr - NextPC;
assert(isInt<32>(Delta));
write32le(Buf + 2, Delta);
}
bool X86_64TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
return Type == R_X86_64_GOTPCREL || relocNeedsPlt(Type, S);
}
unsigned X86_64TargetInfo::getPLTRefReloc(unsigned Type) const {
switch (Type) {
case R_X86_64_32:
return R_X86_64_32;
case R_X86_64_PC32:
case R_X86_64_PLT32:
return R_X86_64_PC32;
}
llvm_unreachable("Unexpected relocation");
}
bool X86_64TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
switch (Type) {
default:
return false;
case R_X86_64_32:
case R_X86_64_PC32:
// This relocation is defined to have a value of (S + A - P).
// The problems start when a non PIC program calls a function in a shared
// library.
// In an ideal world, we could just report an error saying the relocation
// can overflow at runtime.
// In the real world with glibc, crt1.o has a R_X86_64_PC32 pointing to
// libc.so.
//
// The general idea on how to handle such cases is to create a PLT entry
// and use that as the function value.
//
// For the static linking part, we just return true and everything else
// will use the the PLT entry as the address.
//
// The remaining (unimplemented) problem is making sure pointer equality
// still works. We need the help of the dynamic linker for that. We
// let it know that we have a direct reference to a so symbol by creating
// an undefined symbol with a non zero st_value. Seeing that, the
// dynamic linker resolves the symbol to the value of the symbol we created.
// This is true even for got entries, so pointer equality is maintained.
// To avoid an infinite loop, the only entry that points to the
// real function is a dedicated got entry used by the plt. That is
// identified by special relocation types (R_X86_64_JUMP_SLOT,
// R_386_JMP_SLOT, etc).
return S.isShared();
case R_X86_64_PLT32:
return canBePreempted(&S, true);
}
}
bool X86_64TargetInfo::isRelRelative(uint32_t Type) const {
switch (Type) {
default:
return false;
case R_X86_64_PC64:
case R_X86_64_PC32:
case R_X86_64_PC16:
case R_X86_64_PC8:
return true;
}
}
void X86_64TargetInfo::relocateOne(uint8_t *Buf, uint8_t *BufEnd,
const void *RelP, uint32_t Type,
uint64_t BaseAddr, uint64_t SA) const {
typedef ELFFile<ELF64LE>::Elf_Rela Elf_Rela;
auto &Rel = *reinterpret_cast<const Elf_Rela *>(RelP);
uint64_t Offset = Rel.r_offset;
uint8_t *Loc = Buf + Offset;
switch (Type) {
case R_X86_64_PC32:
case R_X86_64_GOTPCREL:
case R_X86_64_PLT32:
write32le(Loc, SA - BaseAddr - Offset);
break;
case R_X86_64_64:
write64le(Loc, SA);
break;
case R_X86_64_32: {
case R_X86_64_32S:
if (Type == R_X86_64_32 && !isUInt<32>(SA))
error("R_X86_64_32 out of range");
else if (!isInt<32>(SA))
error("R_X86_64_32S out of range");
write32le(Loc, SA);
break;
}
default:
error("unrecognized reloc " + Twine(Type));
}
}
// Relocation masks following the #lo(value), #hi(value), #ha(value),
// #higher(value), #highera(value), #highest(value), and #highesta(value)
// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
// document.
static uint16_t applyPPCLo(uint64_t V) { return V & 0xffff; }
static uint16_t applyPPCHi(uint64_t V) { return (V >> 16) & 0xffff; }
static uint16_t applyPPCHa(uint64_t V) { return ((V + 0x8000) >> 16) & 0xffff; }
static uint16_t applyPPCHigher(uint64_t V) { return (V >> 32) & 0xffff; }
static uint16_t applyPPCHighera(uint64_t V) {
return ((V + 0x8000) >> 32) & 0xffff;
}
static uint16_t applyPPCHighest(uint64_t V) { return V >> 48; }
static uint16_t applyPPCHighesta(uint64_t V) { return (V + 0x8000) >> 48; }
PPC64TargetInfo::PPC64TargetInfo() {
PCRelReloc = R_PPC64_REL24;
GotReloc = R_PPC64_GLOB_DAT;
GotRefReloc = R_PPC64_REL64;
RelativeReloc = R_PPC64_RELATIVE;
PltEntrySize = 32;
// We need 64K pages (at least under glibc/Linux, the loader won't
// set different permissions on a finer granularity than that).
PageSize = 65536;
// The PPC64 ELF ABI v1 spec, says:
//
// It is normally desirable to put segments with different characteristics
// in separate 256 Mbyte portions of the address space, to give the
// operating system full paging flexibility in the 64-bit address space.
//
// And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
// use 0x10000000 as the starting address.
VAStart = 0x10000000;
}
uint64_t getPPC64TocBase() {
// The TOC consists of sections .got, .toc, .tocbss, .plt in that
// order. The TOC starts where the first of these sections starts.
// FIXME: This obviously does not do the right thing when there is no .got
// section, but there is a .toc or .tocbss section.
uint64_t TocVA = Out<ELF64BE>::Got->getVA();
if (!TocVA)
TocVA = Out<ELF64BE>::Plt->getVA();
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment. Note that the glibc startup
// code (crt1.o) assumes that you can get from the TOC base to the
// start of the .toc section with only a single (signed) 16-bit relocation.
return TocVA + 0x8000;
}
void PPC64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const {
uint64_t Off = GotEntryAddr - getPPC64TocBase();
// FIXME: What we should do, in theory, is get the offset of the function
// descriptor in the .opd section, and use that as the offset from %r2 (the
// TOC-base pointer). Instead, we have the GOT-entry offset, and that will
// be a pointer to the function descriptor in the .opd section. Using
// this scheme is simpler, but requires an extra indirection per PLT dispatch.
write32be(Buf, 0xf8410028); // std %r2, 40(%r1)
write32be(Buf + 4, 0x3d620000 | applyPPCHa(Off)); // addis %r11, %r2, X@ha
write32be(Buf + 8, 0xe98b0000 | applyPPCLo(Off)); // ld %r12, X@l(%r11)
write32be(Buf + 12, 0xe96c0000); // ld %r11,0(%r12)
write32be(Buf + 16, 0x7d6903a6); // mtctr %r11
write32be(Buf + 20, 0xe84c0008); // ld %r2,8(%r12)
write32be(Buf + 24, 0xe96c0010); // ld %r11,16(%r12)
write32be(Buf + 28, 0x4e800420); // bctr
}
bool PPC64TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const {
if (relocNeedsPlt(Type, S))
return true;
switch (Type) {
default: return false;
case R_PPC64_GOT16:
case R_PPC64_GOT16_LO:
case R_PPC64_GOT16_HI:
case R_PPC64_GOT16_HA:
case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_LO_DS:
return true;
}
}
bool PPC64TargetInfo::relocNeedsPlt(uint32_t Type, const SymbolBody &S) const {
// These are function calls that need to be redirected through a PLT stub.
return Type == R_PPC64_REL24 && canBePreempted(&S, false);
}
bool PPC64TargetInfo::isRelRelative(uint32_t Type) const {
switch (Type) {
default:
return true;
case R_PPC64_TOC:
case R_PPC64_ADDR64:
return false;
}
}
void PPC64TargetInfo::relocateOne(uint8_t *Buf, uint8_t *BufEnd,
const void *RelP, uint32_t Type,
uint64_t BaseAddr, uint64_t SA) const {
typedef ELFFile<ELF64BE>::Elf_Rela Elf_Rela;
auto &Rel = *reinterpret_cast<const Elf_Rela *>(RelP);
uint8_t *L = Buf + Rel.r_offset;
uint64_t P = BaseAddr + Rel.r_offset;
uint64_t TB = getPPC64TocBase();
// For a TOC-relative relocation, adjust the addend and proceed in terms of
// the corresponding ADDR16 relocation type.
switch (Type) {
case R_PPC64_TOC16: Type = R_PPC64_ADDR16; SA -= TB; break;
case R_PPC64_TOC16_DS: Type = R_PPC64_ADDR16_DS; SA -= TB; break;
case R_PPC64_TOC16_LO: Type = R_PPC64_ADDR16_LO; SA -= TB; break;
case R_PPC64_TOC16_LO_DS: Type = R_PPC64_ADDR16_LO_DS; SA -= TB; break;
case R_PPC64_TOC16_HI: Type = R_PPC64_ADDR16_HI; SA -= TB; break;
case R_PPC64_TOC16_HA: Type = R_PPC64_ADDR16_HA; SA -= TB; break;
default: break;
}
switch (Type) {
case R_PPC64_ADDR16:
if (!isInt<16>(SA))
error("Relocation R_PPC64_ADDR16 overflow");
write16be(L, SA);
break;
case R_PPC64_ADDR16_DS:
if (!isInt<16>(SA))
error("Relocation R_PPC64_ADDR16_DS overflow");
write16be(L, (read16be(L) & 3) | (SA & ~3));
break;
case R_PPC64_ADDR16_LO:
write16be(L, applyPPCLo(SA));
break;
case R_PPC64_ADDR16_LO_DS:
write16be(L, (read16be(L) & 3) | (applyPPCLo(SA) & ~3));
break;
case R_PPC64_ADDR16_HI:
write16be(L, applyPPCHi(SA));
break;
case R_PPC64_ADDR16_HA:
write16be(L, applyPPCHa(SA));
break;
case R_PPC64_ADDR16_HIGHER:
write16be(L, applyPPCHigher(SA));
break;
case R_PPC64_ADDR16_HIGHERA:
write16be(L, applyPPCHighera(SA));
break;
case R_PPC64_ADDR16_HIGHEST:
write16be(L, applyPPCHighest(SA));
break;
case R_PPC64_ADDR16_HIGHESTA:
write16be(L, applyPPCHighesta(SA));
break;
case R_PPC64_ADDR14: {
if ((SA & 3) != 0)
error("Improper alignment for relocation R_PPC64_ADDR14");
// Preserve the AA/LK bits in the branch instruction
uint8_t AALK = L[3];
write16be(L + 2, (AALK & 3) | (SA & 0xfffc));
break;
}
case R_PPC64_REL16_LO:
write16be(L, applyPPCLo(SA - P));
break;
case R_PPC64_REL16_HI:
write16be(L, applyPPCHi(SA - P));
break;
case R_PPC64_REL16_HA:
write16be(L, applyPPCHa(SA - P));
break;
case R_PPC64_ADDR32:
if (!isInt<32>(SA))
error("Relocation R_PPC64_ADDR32 overflow");
write32be(L, SA);
break;
case R_PPC64_REL24: {
// If we have an undefined weak symbol, we might get here with a symbol
// address of zero. That could overflow, but the code must be unreachable,
// so don't bother doing anything at all.
if (!SA)
break;
uint64_t PltStart = Out<ELF64BE>::Plt->getVA();
uint64_t PltEnd = PltStart + Out<ELF64BE>::Plt->getSize();
bool InPlt = PltStart <= SA && SA < PltEnd;
if (!InPlt && Out<ELF64BE>::Opd) {
// If this is a local call, and we currently have the address of a
// function-descriptor, get the underlying code address instead.
uint64_t OpdStart = Out<ELF64BE>::Opd->getVA();
uint64_t OpdEnd = OpdStart + Out<ELF64BE>::Opd->getSize();
bool InOpd = OpdStart <= SA && SA < OpdEnd;
if (InOpd)
SA = read64be(&Out<ELF64BE>::OpdBuf[SA - OpdStart]);
}
uint32_t Mask = 0x03FFFFFC;
if (!isInt<24>(SA - P))
error("Relocation R_PPC64_REL24 overflow");
write32be(L, (read32be(L) & ~Mask) | ((SA - P) & Mask));
if (InPlt && L + 8 <= BufEnd &&
read32be(L + 4) == 0x60000000 /* nop */)
write32be(L + 4, 0xe8410028); // ld %r2, 40(%r1)
break;
}
case R_PPC64_REL32:
if (!isInt<32>(SA - P))
error("Relocation R_PPC64_REL32 overflow");
write32be(L, SA - P);
break;
case R_PPC64_REL64:
write64be(L, SA - P);
break;
case R_PPC64_ADDR64:
case R_PPC64_TOC:
write64be(L, SA);
break;
default:
error("unrecognized reloc " + Twine(Type));
}
}
AArch64TargetInfo::AArch64TargetInfo() {
// PCRelReloc = FIXME
// GotReloc = FIXME
}
void AArch64TargetInfo::writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const {}
bool AArch64TargetInfo::relocNeedsGot(uint32_t Type,
const SymbolBody &S) const {
return false;
}
bool AArch64TargetInfo::relocNeedsPlt(uint32_t Type,
const SymbolBody &S) const {
return false;
}
static void updateAArch64Adr(uint8_t *L, uint64_t Imm) {
uint32_t ImmLo = (Imm & 0x3) << 29;
uint32_t ImmHi = ((Imm & 0x1FFFFC) >> 2) << 5;
uint64_t Mask = (0x3 << 29) | (0x7FFFF << 5);
write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
}
// Page(Expr) is the page address of the expression Expr, defined
// as (Expr & ~0xFFF). (This applies even if the machine page size
// supported by the platform has a different value.)
static uint64_t getAArch64Page(uint64_t Expr) {
return Expr & (~static_cast<uint64_t>(0xFFF));
}
void AArch64TargetInfo::relocateOne(uint8_t *Buf, uint8_t *BufEnd,
const void *RelP, uint32_t Type,
uint64_t BaseAddr, uint64_t SA) const {
typedef ELFFile<ELF64LE>::Elf_Rela Elf_Rela;
auto &Rel = *reinterpret_cast<const Elf_Rela *>(RelP);
uint8_t *L = Buf + Rel.r_offset;
uint64_t P = BaseAddr + Rel.r_offset;
switch (Type) {
case R_AARCH64_ABS16:
if (!isInt<16>(SA))
error("Relocation R_AARCH64_ABS16 out of range");
write16le(L, SA);
break;
case R_AARCH64_ABS32:
if (!isInt<32>(SA))
error("Relocation R_AARCH64_ABS32 out of range");
write32le(L, SA);
break;
case R_AARCH64_ABS64:
// No overflow check needed.
write64le(L, SA);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
// No overflow check needed.
// This relocation stores 12 bits and there's no instruction
// to do it. Instead, we do a 32 bits store of the value
// of r_addend bitwise-or'ed L. This assumes that the addend
// bits in L are zero.
or32le(L, (SA & 0xFFF) << 10);
break;
case R_AARCH64_ADR_PREL_LO21: {
uint64_t X = SA - P;
if (!isInt<21>(X))
error("Relocation R_AARCH64_ADR_PREL_LO21 out of range");
updateAArch64Adr(L, X & 0x1FFFFF);
break;
}
case R_AARCH64_ADR_PREL_PG_HI21: {
uint64_t X = getAArch64Page(SA) - getAArch64Page(P);
if (!isInt<33>(X))
error("Relocation R_AARCH64_ADR_PREL_PG_HI21 out of range");
updateAArch64Adr(L, (X >> 12) & 0x1FFFFF); // X[32:12]
break;
}
default:
error("unrecognized reloc " + Twine(Type));
}
}
template <class ELFT> MipsTargetInfo<ELFT>::MipsTargetInfo() {
// PCRelReloc = FIXME
// GotReloc = FIXME
PageSize = 65536;
}
template <class ELFT>
void MipsTargetInfo<ELFT>::writePltEntry(uint8_t *Buf, uint64_t GotEntryAddr,
uint64_t PltEntryAddr) const {}
template <class ELFT>
bool MipsTargetInfo<ELFT>::relocNeedsGot(uint32_t Type,
const SymbolBody &S) const {
return false;
}
template <class ELFT>
bool MipsTargetInfo<ELFT>::relocNeedsPlt(uint32_t Type,
const SymbolBody &S) const {
return false;
}
template <class ELFT>
void MipsTargetInfo<ELFT>::relocateOne(uint8_t *Buf, uint8_t *BufEnd,
const void *RelP, uint32_t Type,
uint64_t BaseAddr, uint64_t SA) const {
const bool IsLE = ELFT::TargetEndianness == support::little;
typedef typename ELFFile<ELFT>::Elf_Rel Elf_Rel;
auto &Rel = *reinterpret_cast<const Elf_Rel *>(RelP);
switch (Type) {
case R_MIPS_32:
add32<IsLE>(Buf + Rel.r_offset, SA);
break;
default:
error("unrecognized reloc " + Twine(Type));
}
}
}
}
|