summaryrefslogtreecommitdiffstats
path: root/llvm/lib/MC
diff options
context:
space:
mode:
authorAlex Bradbury <asb@lowrisc.org>2018-05-23 12:36:18 +0000
committerAlex Bradbury <asb@lowrisc.org>2018-05-23 12:36:18 +0000
commit257d5b56390d39e498561bd58999a00d92561f05 (patch)
tree63be25990a0ad5b03b8b380f4f56b6454c1dadd9 /llvm/lib/MC
parent3fa69dd055a49e207ee03d37b6e79205e4ab981a (diff)
downloadbcm5719-llvm-257d5b56390d39e498561bd58999a00d92561f05.tar.gz
bcm5719-llvm-257d5b56390d39e498561bd58999a00d92561f05.zip
[RISCV] Add symbol diff relocation support for RISC-V
For RISC-V it is desirable to have relaxation happen in the linker once addresses are known, and as such the size between two instructions/byte sequences in a section could change. For most assembler expressions, this is fine, as the absolute address results in the expression being converted to a fixup, and finally relocations. However, for expressions such as .quad .L2-.L1, the assembler folds this down to a constant once fragments are laid out, under the assumption that the difference can no longer change, although in the case of linker relaxation the differences can change at link time, so the constant is incorrect. One place where this commonly appears is in debug information, where the size of a function expression is in a form similar to the above. This patch extends the assembler to allow an AsmBackend to declare that it does not want the assembler to fold down this expression, and instead generate a pair of relocations that allow the linker to carry out the calculation. In this case, the expression is not folded, but when it comes to emitting a fixup, the generic FK_Data_* fixups are converted into a pair, one for the addition half, one for the subtraction, and this is passed to the relocation generating methods as usual. I have named these FK_Data_Add_* and FK_Data_Sub_* to indicate which half these are for. For RISC-V, which supports this via e.g. the R_RISCV_ADD64, R_RISCV_SUB64 pair of relocations, these are also set to always emit relocations relative to local symbols rather than section offsets. This is to deal with the fact that if relocations were calculated on e.g. .text+8 and .text+4, the result 12 would be stored rather than 4 as both addends are added in the linker. Differential Revision: https://reviews.llvm.org/D45181 Patch by Simon Cook. llvm-svn: 333079
Diffstat (limited to 'llvm/lib/MC')
-rw-r--r--llvm/lib/MC/MCAsmBackend.cpp10
-rw-r--r--llvm/lib/MC/MCAssembler.cpp21
-rw-r--r--llvm/lib/MC/MCExpr.cpp9
3 files changed, 36 insertions, 4 deletions
diff --git a/llvm/lib/MC/MCAsmBackend.cpp b/llvm/lib/MC/MCAsmBackend.cpp
index 3119bb997d0..92d3a8a2645 100644
--- a/llvm/lib/MC/MCAsmBackend.cpp
+++ b/llvm/lib/MC/MCAsmBackend.cpp
@@ -84,7 +84,15 @@ const MCFixupKindInfo &MCAsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
{"FK_SecRel_1", 0, 8, 0},
{"FK_SecRel_2", 0, 16, 0},
{"FK_SecRel_4", 0, 32, 0},
- {"FK_SecRel_8", 0, 64, 0}};
+ {"FK_SecRel_8", 0, 64, 0},
+ {"FK_Data_Add_1", 0, 8, 0},
+ {"FK_Data_Add_2", 0, 16, 0},
+ {"FK_Data_Add_4", 0, 32, 0},
+ {"FK_Data_Add_8", 0, 64, 0},
+ {"FK_Data_Sub_1", 0, 8, 0},
+ {"FK_Data_Sub_2", 0, 16, 0},
+ {"FK_Data_Sub_4", 0, 32, 0},
+ {"FK_Data_Sub_8", 0, 64, 0}};
assert((size_t)Kind <= array_lengthof(Builtins) && "Unknown fixup kind");
return Builtins[Kind];
diff --git a/llvm/lib/MC/MCAssembler.cpp b/llvm/lib/MC/MCAssembler.cpp
index f63df8b1002..0cf17a10d08 100644
--- a/llvm/lib/MC/MCAssembler.cpp
+++ b/llvm/lib/MC/MCAssembler.cpp
@@ -720,7 +720,26 @@ MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F,
// The fixup was unresolved, we need a relocation. Inform the object
// writer of the relocation, and give it an opportunity to adjust the
// fixup value if need be.
- getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue);
+ if (Target.getSymA() && Target.getSymB() &&
+ getBackend().requiresDiffExpressionRelocations()) {
+ // The fixup represents the difference between two symbols, which the
+ // backend has indicated must be resolved at link time. Split up the fixup
+ // into two relocations, one for the add, and one for the sub, and emit
+ // both of these. The constant will be associated with the add half of the
+ // expression.
+ MCFixup FixupAdd = MCFixup::createAddFor(Fixup);
+ MCValue TargetAdd =
+ MCValue::get(Target.getSymA(), nullptr, Target.getConstant());
+ getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd,
+ FixedValue);
+ MCFixup FixupSub = MCFixup::createSubFor(Fixup);
+ MCValue TargetSub = MCValue::get(Target.getSymB());
+ getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub,
+ FixedValue);
+ } else {
+ getWriter().recordRelocation(*this, Layout, &F, Fixup, Target,
+ FixedValue);
+ }
}
return std::make_tuple(Target, FixedValue, IsResolved);
}
diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp
index 831c692fb2e..d38127fe135 100644
--- a/llvm/lib/MC/MCExpr.cpp
+++ b/llvm/lib/MC/MCExpr.cpp
@@ -11,6 +11,7 @@
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Config/llvm-config.h"
+#include "llvm/MC/MCAsmBackend.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCAsmLayout.h"
#include "llvm/MC/MCAssembler.h"
@@ -576,8 +577,12 @@ EvaluateSymbolicAdd(const MCAssembler *Asm, const MCAsmLayout *Layout,
assert((!Layout || Asm) &&
"Must have an assembler object if layout is given!");
- // If we have a layout, we can fold resolved differences.
- if (Asm) {
+ // If we have a layout, we can fold resolved differences. Do not do this if
+ // the backend requires this to be emitted as individual relocations, unless
+ // the InSet flag is set to get the current difference anyway (used for
+ // example to calculate symbol sizes).
+ if (Asm &&
+ (InSet || !Asm->getBackend().requiresDiffExpressionRelocations())) {
// First, fold out any differences which are fully resolved. By
// reassociating terms in
// Result = (LHS_A - LHS_B + LHS_Cst) + (RHS_A - RHS_B + RHS_Cst).
OpenPOWER on IntegriCloud