aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/tools/lld/ELF/Arch
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-21 20:44:42 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-21 20:44:42 +0000
commit22d1794578bafc3a0f25691c0531ba10d57713e6 (patch)
tree7a15dfb784af9445419fb44eddb49d039bd4cdc2 /contrib/llvm/tools/lld/ELF/Arch
parente123fc8fd8677e4dc86f274cffd069e5d56f4a01 (diff)
parentf1e1c239e31b467e17f1648b1f524fc9ab5b431a (diff)
Merge lld trunk r366426, and resolve conflicts.
Notes
Notes: svn path=/projects/clang900-import/; revision=351353
Diffstat (limited to 'contrib/llvm/tools/lld/ELF/Arch')
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/AArch64.cpp466
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/AMDGPU.cpp70
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/ARM.cpp462
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/AVR.cpp37
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/Hexagon.cpp195
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/MSP430.cpp49
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/Mips.cpp607
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/MipsArchTree.cpp239
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/PPC.cpp407
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/PPC64.cpp722
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/RISCV.cpp401
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/SPARCV9.cpp102
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/X86.cpp440
-rw-r--r--contrib/llvm/tools/lld/ELF/Arch/X86_64.cpp623
14 files changed, 2847 insertions, 1973 deletions
diff --git a/contrib/llvm/tools/lld/ELF/Arch/AArch64.cpp b/contrib/llvm/tools/lld/ELF/Arch/AArch64.cpp
index 08ffe2a08c0f..4d4789702f03 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/AArch64.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/AArch64.cpp
@@ -1,9 +1,8 @@
//===- AArch64.cpp --------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -24,60 +23,59 @@ using namespace lld::elf;
// Page(Expr) is the page address of the expression Expr, defined
// as (Expr & ~0xFFF). (This applies even if the machine page size
// supported by the platform has a different value.)
-uint64_t elf::getAArch64Page(uint64_t Expr) {
- return Expr & ~static_cast<uint64_t>(0xFFF);
+uint64_t elf::getAArch64Page(uint64_t expr) {
+ return expr & ~static_cast<uint64_t>(0xFFF);
}
namespace {
-class AArch64 final : public TargetInfo {
+class AArch64 : public TargetInfo {
public:
AArch64();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- RelType getDynRel(RelType Type) const override;
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- bool needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const override;
uint32_t getThunkSectionSpacing() const override;
- bool inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const override;
- bool usesOnlyLowPageBits(RelType Type) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- RelExpr adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const override;
- void relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
+ bool usesOnlyLowPageBits(RelType type) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const override;
+ void relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
AArch64::AArch64() {
- CopyRel = R_AARCH64_COPY;
- RelativeRel = R_AARCH64_RELATIVE;
- IRelativeRel = R_AARCH64_IRELATIVE;
- GotRel = R_AARCH64_GLOB_DAT;
- NoneRel = R_AARCH64_NONE;
- PltRel = R_AARCH64_JUMP_SLOT;
- TlsDescRel = R_AARCH64_TLSDESC;
- TlsGotRel = R_AARCH64_TLS_TPREL64;
- GotEntrySize = 8;
- GotPltEntrySize = 8;
- PltEntrySize = 16;
- PltHeaderSize = 32;
- DefaultMaxPageSize = 65536;
+ copyRel = R_AARCH64_COPY;
+ relativeRel = R_AARCH64_RELATIVE;
+ iRelativeRel = R_AARCH64_IRELATIVE;
+ gotRel = R_AARCH64_GLOB_DAT;
+ noneRel = R_AARCH64_NONE;
+ pltRel = R_AARCH64_JUMP_SLOT;
+ symbolicRel = R_AARCH64_ABS64;
+ tlsDescRel = R_AARCH64_TLSDESC;
+ tlsGotRel = R_AARCH64_TLS_TPREL64;
+ pltEntrySize = 16;
+ pltHeaderSize = 32;
+ defaultMaxPageSize = 65536;
// Align to the 2 MiB page size (known as a superpage or huge page).
// FreeBSD automatically promotes 2 MiB-aligned allocations.
- DefaultImageBase = 0x200000;
+ defaultImageBase = 0x200000;
- NeedsThunks = true;
+ needsThunks = true;
}
-RelExpr AArch64::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr AArch64::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_AARCH64_TLSDESC_ADR_PAGE21:
return R_AARCH64_TLSDESC_PAGE;
case R_AARCH64_TLSDESC_LD64_LO12:
@@ -105,6 +103,7 @@ RelExpr AArch64::getRelExpr(RelType Type, const Symbol &S,
case R_AARCH64_LD_PREL_LO19:
return R_PC;
case R_AARCH64_ADR_PREL_PG_HI21:
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
return R_AARCH64_PAGE_PC;
case R_AARCH64_LD64_GOT_LO12_NC:
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
@@ -119,18 +118,18 @@ RelExpr AArch64::getRelExpr(RelType Type, const Symbol &S,
}
}
-RelExpr AArch64::adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const {
- if (Expr == R_RELAX_TLS_GD_TO_IE) {
- if (Type == R_AARCH64_TLSDESC_ADR_PAGE21)
+RelExpr AArch64::adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const {
+ if (expr == R_RELAX_TLS_GD_TO_IE) {
+ if (type == R_AARCH64_TLSDESC_ADR_PAGE21)
return R_AARCH64_RELAX_TLS_GD_TO_IE_PAGE_PC;
return R_RELAX_TLS_GD_TO_IE_ABS;
}
- return Expr;
+ return expr;
}
-bool AArch64::usesOnlyLowPageBits(RelType Type) const {
- switch (Type) {
+bool AArch64::usesOnlyLowPageBits(RelType type) const {
+ switch (type) {
default:
return false;
case R_AARCH64_ADD_ABS_LO12_NC:
@@ -147,18 +146,18 @@ bool AArch64::usesOnlyLowPageBits(RelType Type) const {
}
}
-RelType AArch64::getDynRel(RelType Type) const {
- if (Type == R_AARCH64_ABS32 || Type == R_AARCH64_ABS64)
- return Type;
+RelType AArch64::getDynRel(RelType type) const {
+ if (type == R_AARCH64_ABS64)
+ return type;
return R_AARCH64_NONE;
}
-void AArch64::writeGotPlt(uint8_t *Buf, const Symbol &) const {
- write64le(Buf, In.Plt->getVA());
+void AArch64::writeGotPlt(uint8_t *buf, const Symbol &) const {
+ write64le(buf, in.plt->getVA());
}
-void AArch64::writePltHeader(uint8_t *Buf) const {
- const uint8_t PltData[] = {
+void AArch64::writePltHeader(uint8_t *buf) const {
+ const uint8_t pltData[] = {
0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]!
0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2]))
0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))]
@@ -168,42 +167,42 @@ void AArch64::writePltHeader(uint8_t *Buf) const {
0x1f, 0x20, 0x03, 0xd5, // nop
0x1f, 0x20, 0x03, 0xd5 // nop
};
- memcpy(Buf, PltData, sizeof(PltData));
-
- uint64_t Got = In.GotPlt->getVA();
- uint64_t Plt = In.Plt->getVA();
- relocateOne(Buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
- getAArch64Page(Got + 16) - getAArch64Page(Plt + 4));
- relocateOne(Buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, Got + 16);
- relocateOne(Buf + 12, R_AARCH64_ADD_ABS_LO12_NC, Got + 16);
+ memcpy(buf, pltData, sizeof(pltData));
+
+ uint64_t got = in.gotPlt->getVA();
+ uint64_t plt = in.plt->getVA();
+ relocateOne(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
+ getAArch64Page(got + 16) - getAArch64Page(plt + 4));
+ relocateOne(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
+ relocateOne(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
}
-void AArch64::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Inst[] = {
+void AArch64::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t inst[] = {
0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))]
0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[n]))
0x20, 0x02, 0x1f, 0xd6 // br x17
};
- memcpy(Buf, Inst, sizeof(Inst));
+ memcpy(buf, inst, sizeof(inst));
- relocateOne(Buf, R_AARCH64_ADR_PREL_PG_HI21,
- getAArch64Page(GotPltEntryAddr) - getAArch64Page(PltEntryAddr));
- relocateOne(Buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, GotPltEntryAddr);
- relocateOne(Buf + 8, R_AARCH64_ADD_ABS_LO12_NC, GotPltEntryAddr);
+ relocateOne(buf, R_AARCH64_ADR_PREL_PG_HI21,
+ getAArch64Page(gotPltEntryAddr) - getAArch64Page(pltEntryAddr));
+ relocateOne(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
+ relocateOne(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
}
-bool AArch64::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const {
+bool AArch64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const {
// ELF for the ARM 64-bit architecture, section Call and Jump relocations
// only permits range extension thunks for R_AARCH64_CALL26 and
// R_AARCH64_JUMP26 relocation types.
- if (Type != R_AARCH64_CALL26 && Type != R_AARCH64_JUMP26)
+ if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26)
return false;
- uint64_t Dst = (Expr == R_PLT_PC) ? S.getPltVA() : S.getVA();
- return !inBranchRange(Type, BranchAddr, Dst);
+ uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+ return !inBranchRange(type, branchAddr, dst);
}
uint32_t AArch64::getThunkSectionSpacing() const {
@@ -213,71 +212,72 @@ uint32_t AArch64::getThunkSectionSpacing() const {
return (128 * 1024 * 1024) - 0x30000;
}
-bool AArch64::inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const {
- if (Type != R_AARCH64_CALL26 && Type != R_AARCH64_JUMP26)
+bool AArch64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
+ if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26)
return true;
// The AArch64 call and unconditional branch instructions have a range of
// +/- 128 MiB.
- uint64_t Range = 128 * 1024 * 1024;
- if (Dst > Src) {
+ uint64_t range = 128 * 1024 * 1024;
+ if (dst > src) {
// Immediate of branch is signed.
- Range -= 4;
- return Dst - Src <= Range;
+ range -= 4;
+ return dst - src <= range;
}
- return Src - Dst <= Range;
+ return src - dst <= range;
}
-static void write32AArch64Addr(uint8_t *L, uint64_t Imm) {
- uint32_t ImmLo = (Imm & 0x3) << 29;
- uint32_t ImmHi = (Imm & 0x1FFFFC) << 3;
- uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3);
- write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi);
+static void write32AArch64Addr(uint8_t *l, uint64_t imm) {
+ uint32_t immLo = (imm & 0x3) << 29;
+ uint32_t immHi = (imm & 0x1FFFFC) << 3;
+ uint64_t mask = (0x3 << 29) | (0x1FFFFC << 3);
+ write32le(l, (read32le(l) & ~mask) | immLo | immHi);
}
// Return the bits [Start, End] from Val shifted Start bits.
// For instance, getBits(0xF0, 4, 8) returns 0xF.
-static uint64_t getBits(uint64_t Val, int Start, int End) {
- uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1;
- return (Val >> Start) & Mask;
+static uint64_t getBits(uint64_t val, int start, int end) {
+ uint64_t mask = ((uint64_t)1 << (end + 1 - start)) - 1;
+ return (val >> start) & mask;
}
-static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
+static void or32le(uint8_t *p, int32_t v) { write32le(p, read32le(p) | v); }
// Update the immediate field in a AARCH64 ldr, str, and add instruction.
-static void or32AArch64Imm(uint8_t *L, uint64_t Imm) {
- or32le(L, (Imm & 0xFFF) << 10);
+static void or32AArch64Imm(uint8_t *l, uint64_t imm) {
+ or32le(l, (imm & 0xFFF) << 10);
}
-void AArch64::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void AArch64::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_AARCH64_ABS16:
case R_AARCH64_PREL16:
- checkIntUInt(Loc, Val, 16, Type);
- write16le(Loc, Val);
+ checkIntUInt(loc, val, 16, type);
+ write16le(loc, val);
break;
case R_AARCH64_ABS32:
case R_AARCH64_PREL32:
- checkIntUInt(Loc, Val, 32, Type);
- write32le(Loc, Val);
+ checkIntUInt(loc, val, 32, type);
+ write32le(loc, val);
break;
case R_AARCH64_ABS64:
- case R_AARCH64_GLOB_DAT:
case R_AARCH64_PREL64:
- write64le(Loc, Val);
+ write64le(loc, val);
break;
case R_AARCH64_ADD_ABS_LO12_NC:
- or32AArch64Imm(Loc, Val);
+ or32AArch64Imm(loc, val);
break;
case R_AARCH64_ADR_GOT_PAGE:
case R_AARCH64_ADR_PREL_PG_HI21:
case R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
case R_AARCH64_TLSDESC_ADR_PAGE21:
- checkInt(Loc, Val, 33, Type);
- write32AArch64Addr(Loc, Val >> 12);
+ checkInt(loc, val, 33, type);
+ LLVM_FALLTHROUGH;
+ case R_AARCH64_ADR_PREL_PG_HI21_NC:
+ write32AArch64Addr(loc, val >> 12);
break;
case R_AARCH64_ADR_PREL_LO21:
- checkInt(Loc, Val, 21, Type);
- write32AArch64Addr(Loc, Val);
+ checkInt(loc, val, 21, type);
+ write32AArch64Addr(loc, val);
break;
case R_AARCH64_JUMP26:
// Normally we would just write the bits of the immediate field, however
@@ -287,75 +287,75 @@ void AArch64::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
// opcode and the immediate (0 001 | 01 imm26) we can do this
// transformation by placing a R_AARCH64_JUMP26 relocation at the offset of
// the instruction we want to patch.
- write32le(Loc, 0x14000000);
+ write32le(loc, 0x14000000);
LLVM_FALLTHROUGH;
case R_AARCH64_CALL26:
- checkInt(Loc, Val, 28, Type);
- or32le(Loc, (Val & 0x0FFFFFFC) >> 2);
+ checkInt(loc, val, 28, type);
+ or32le(loc, (val & 0x0FFFFFFC) >> 2);
break;
case R_AARCH64_CONDBR19:
case R_AARCH64_LD_PREL_LO19:
- checkAlignment(Loc, Val, 4, Type);
- checkInt(Loc, Val, 21, Type);
- or32le(Loc, (Val & 0x1FFFFC) << 3);
+ checkAlignment(loc, val, 4, type);
+ checkInt(loc, val, 21, type);
+ or32le(loc, (val & 0x1FFFFC) << 3);
break;
case R_AARCH64_LDST8_ABS_LO12_NC:
case R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC:
- or32AArch64Imm(Loc, getBits(Val, 0, 11));
+ or32AArch64Imm(loc, getBits(val, 0, 11));
break;
case R_AARCH64_LDST16_ABS_LO12_NC:
case R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC:
- checkAlignment(Loc, Val, 2, Type);
- or32AArch64Imm(Loc, getBits(Val, 1, 11));
+ checkAlignment(loc, val, 2, type);
+ or32AArch64Imm(loc, getBits(val, 1, 11));
break;
case R_AARCH64_LDST32_ABS_LO12_NC:
case R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC:
- checkAlignment(Loc, Val, 4, Type);
- or32AArch64Imm(Loc, getBits(Val, 2, 11));
+ checkAlignment(loc, val, 4, type);
+ or32AArch64Imm(loc, getBits(val, 2, 11));
break;
case R_AARCH64_LDST64_ABS_LO12_NC:
case R_AARCH64_LD64_GOT_LO12_NC:
case R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
case R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC:
case R_AARCH64_TLSDESC_LD64_LO12:
- checkAlignment(Loc, Val, 8, Type);
- or32AArch64Imm(Loc, getBits(Val, 3, 11));
+ checkAlignment(loc, val, 8, type);
+ or32AArch64Imm(loc, getBits(val, 3, 11));
break;
case R_AARCH64_LDST128_ABS_LO12_NC:
case R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC:
- checkAlignment(Loc, Val, 16, Type);
- or32AArch64Imm(Loc, getBits(Val, 4, 11));
+ checkAlignment(loc, val, 16, type);
+ or32AArch64Imm(loc, getBits(val, 4, 11));
break;
case R_AARCH64_MOVW_UABS_G0_NC:
- or32le(Loc, (Val & 0xFFFF) << 5);
+ or32le(loc, (val & 0xFFFF) << 5);
break;
case R_AARCH64_MOVW_UABS_G1_NC:
- or32le(Loc, (Val & 0xFFFF0000) >> 11);
+ or32le(loc, (val & 0xFFFF0000) >> 11);
break;
case R_AARCH64_MOVW_UABS_G2_NC:
- or32le(Loc, (Val & 0xFFFF00000000) >> 27);
+ or32le(loc, (val & 0xFFFF00000000) >> 27);
break;
case R_AARCH64_MOVW_UABS_G3:
- or32le(Loc, (Val & 0xFFFF000000000000) >> 43);
+ or32le(loc, (val & 0xFFFF000000000000) >> 43);
break;
case R_AARCH64_TSTBR14:
- checkInt(Loc, Val, 16, Type);
- or32le(Loc, (Val & 0xFFFC) << 3);
+ checkInt(loc, val, 16, type);
+ or32le(loc, (val & 0xFFFC) << 3);
break;
case R_AARCH64_TLSLE_ADD_TPREL_HI12:
- checkUInt(Loc, Val, 24, Type);
- or32AArch64Imm(Loc, Val >> 12);
+ checkUInt(loc, val, 24, type);
+ or32AArch64Imm(loc, val >> 12);
break;
case R_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
case R_AARCH64_TLSDESC_ADD_LO12:
- or32AArch64Imm(Loc, Val);
+ or32AArch64Imm(loc, val);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
}
}
-void AArch64::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void AArch64::relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const {
// TLSDESC Global-Dynamic relocation are in the form:
// adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21]
// ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12]
@@ -367,25 +367,25 @@ void AArch64::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// movk x0, #0x10
// nop
// nop
- checkUInt(Loc, Val, 32, Type);
+ checkUInt(loc, val, 32, type);
- switch (Type) {
+ switch (type) {
case R_AARCH64_TLSDESC_ADD_LO12:
case R_AARCH64_TLSDESC_CALL:
- write32le(Loc, 0xd503201f); // nop
+ write32le(loc, 0xd503201f); // nop
return;
case R_AARCH64_TLSDESC_ADR_PAGE21:
- write32le(Loc, 0xd2a00000 | (((Val >> 16) & 0xffff) << 5)); // movz
+ write32le(loc, 0xd2a00000 | (((val >> 16) & 0xffff) << 5)); // movz
return;
case R_AARCH64_TLSDESC_LD64_LO12:
- write32le(Loc, 0xf2800000 | ((Val & 0xffff) << 5)); // movk
+ write32le(loc, 0xf2800000 | ((val & 0xffff) << 5)); // movk
return;
default:
llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
}
}
-void AArch64::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void AArch64::relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const {
// TLSDESC Global-Dynamic relocation are in the form:
// adrp x0, :tlsdesc:v [R_AARCH64_TLSDESC_ADR_PAGE21]
// ldr x1, [x0, #:tlsdesc_lo12:v [R_AARCH64_TLSDESC_LD64_LO12]
@@ -398,43 +398,193 @@ void AArch64::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// nop
// nop
- switch (Type) {
+ switch (type) {
case R_AARCH64_TLSDESC_ADD_LO12:
case R_AARCH64_TLSDESC_CALL:
- write32le(Loc, 0xd503201f); // nop
+ write32le(loc, 0xd503201f); // nop
break;
case R_AARCH64_TLSDESC_ADR_PAGE21:
- write32le(Loc, 0x90000000); // adrp
- relocateOne(Loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, Val);
+ write32le(loc, 0x90000000); // adrp
+ relocateOne(loc, R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21, val);
break;
case R_AARCH64_TLSDESC_LD64_LO12:
- write32le(Loc, 0xf9400000); // ldr
- relocateOne(Loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, Val);
+ write32le(loc, 0xf9400000); // ldr
+ relocateOne(loc, R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC, val);
break;
default:
llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
}
}
-void AArch64::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
- checkUInt(Loc, Val, 32, Type);
+void AArch64::relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ checkUInt(loc, val, 32, type);
- if (Type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
+ if (type == R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21) {
// Generate MOVZ.
- uint32_t RegNo = read32le(Loc) & 0x1f;
- write32le(Loc, (0xd2a00000 | RegNo) | (((Val >> 16) & 0xffff) << 5));
+ uint32_t regNo = read32le(loc) & 0x1f;
+ write32le(loc, (0xd2a00000 | regNo) | (((val >> 16) & 0xffff) << 5));
return;
}
- if (Type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
+ if (type == R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC) {
// Generate MOVK.
- uint32_t RegNo = read32le(Loc) & 0x1f;
- write32le(Loc, (0xf2800000 | RegNo) | ((Val & 0xffff) << 5));
+ uint32_t regNo = read32le(loc) & 0x1f;
+ write32le(loc, (0xf2800000 | regNo) | ((val & 0xffff) << 5));
return;
}
llvm_unreachable("invalid relocation for TLS IE to LE relaxation");
}
-TargetInfo *elf::getAArch64TargetInfo() {
- static AArch64 Target;
- return &Target;
+// AArch64 may use security features in variant PLT sequences. These are:
+// Pointer Authentication (PAC), introduced in armv8.3-a and Branch Target
+// Indicator (BTI) introduced in armv8.5-a. The additional instructions used
+// in the variant Plt sequences are encoded in the Hint space so they can be
+// deployed on older architectures, which treat the instructions as a nop.
+// PAC and BTI can be combined leading to the following combinations:
+// writePltHeader
+// writePltHeaderBti (no PAC Header needed)
+// writePlt
+// writePltBti (BTI only)
+// writePltPac (PAC only)
+// writePltBtiPac (BTI and PAC)
+//
+// When PAC is enabled the dynamic loader encrypts the address that it places
+// in the .got.plt using the pacia1716 instruction which encrypts the value in
+// x17 using the modifier in x16. The static linker places autia1716 before the
+// indirect branch to x17 to authenticate the address in x17 with the modifier
+// in x16. This makes it more difficult for an attacker to modify the value in
+// the .got.plt.
+//
+// When BTI is enabled all indirect branches must land on a bti instruction.
+// The static linker must place a bti instruction at the start of any PLT entry
+// that may be the target of an indirect branch. As the PLT entries call the
+// lazy resolver indirectly this must have a bti instruction at start. In
+// general a bti instruction is not needed for a PLT entry as indirect calls
+// are resolved to the function address and not the PLT entry for the function.
+// There are a small number of cases where the PLT address can escape, such as
+// taking the address of a function or ifunc via a non got-generating
+// relocation, and a shared library refers to that symbol.
+//
+// We use the bti c variant of the instruction which permits indirect branches
+// (br) via x16/x17 and indirect function calls (blr) via any register. The ABI
+// guarantees that all indirect branches from code requiring BTI protection
+// will go via x16/x17
+
+namespace {
+class AArch64BtiPac final : public AArch64 {
+public:
+ AArch64BtiPac();
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+
+private:
+ bool btiHeader; // bti instruction needed in PLT Header
+ bool btiEntry; // bti instruction needed in PLT Entry
+ bool pacEntry; // autia1716 instruction needed in PLT Entry
+};
+} // namespace
+
+AArch64BtiPac::AArch64BtiPac() {
+ btiHeader = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_BTI);
+ // A BTI (Branch Target Indicator) Plt Entry is only required if the
+ // address of the PLT entry can be taken by the program, which permits an
+ // indirect jump to the PLT entry. This can happen when the address
+ // of the PLT entry for a function is canonicalised due to the address of
+ // the function in an executable being taken by a shared library.
+ // FIXME: There is a potential optimization to omit the BTI if we detect
+ // that the address of the PLT entry isn't taken.
+ btiEntry = btiHeader && !config->shared;
+ pacEntry = (config->andFeatures & GNU_PROPERTY_AARCH64_FEATURE_1_PAC);
+
+ if (btiEntry || pacEntry)
+ pltEntrySize = 24;
}
+
+void AArch64BtiPac::writePltHeader(uint8_t *buf) const {
+ const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
+ const uint8_t pltData[] = {
+ 0xf0, 0x7b, 0xbf, 0xa9, // stp x16, x30, [sp,#-16]!
+ 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[2]))
+ 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[2]))]
+ 0x10, 0x02, 0x00, 0x91, // add x16, x16, Offset(&(.plt.got[2]))
+ 0x20, 0x02, 0x1f, 0xd6, // br x17
+ 0x1f, 0x20, 0x03, 0xd5, // nop
+ 0x1f, 0x20, 0x03, 0xd5 // nop
+ };
+ const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
+
+ uint64_t got = in.gotPlt->getVA();
+ uint64_t plt = in.plt->getVA();
+
+ if (btiHeader) {
+ // PltHeader is called indirectly by plt[N]. Prefix pltData with a BTI C
+ // instruction.
+ memcpy(buf, btiData, sizeof(btiData));
+ buf += sizeof(btiData);
+ plt += sizeof(btiData);
+ }
+ memcpy(buf, pltData, sizeof(pltData));
+
+ relocateOne(buf + 4, R_AARCH64_ADR_PREL_PG_HI21,
+ getAArch64Page(got + 16) - getAArch64Page(plt + 8));
+ relocateOne(buf + 8, R_AARCH64_LDST64_ABS_LO12_NC, got + 16);
+ relocateOne(buf + 12, R_AARCH64_ADD_ABS_LO12_NC, got + 16);
+ if (!btiHeader)
+ // We didn't add the BTI c instruction so round out size with NOP.
+ memcpy(buf + sizeof(pltData), nopData, sizeof(nopData));
+}
+
+void AArch64BtiPac::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ // The PLT entry is of the form:
+ // [btiData] addrInst (pacBr | stdBr) [nopData]
+ const uint8_t btiData[] = { 0x5f, 0x24, 0x03, 0xd5 }; // bti c
+ const uint8_t addrInst[] = {
+ 0x10, 0x00, 0x00, 0x90, // adrp x16, Page(&(.plt.got[n]))
+ 0x11, 0x02, 0x40, 0xf9, // ldr x17, [x16, Offset(&(.plt.got[n]))]
+ 0x10, 0x02, 0x00, 0x91 // add x16, x16, Offset(&(.plt.got[n]))
+ };
+ const uint8_t pacBr[] = {
+ 0x9f, 0x21, 0x03, 0xd5, // autia1716
+ 0x20, 0x02, 0x1f, 0xd6 // br x17
+ };
+ const uint8_t stdBr[] = {
+ 0x20, 0x02, 0x1f, 0xd6, // br x17
+ 0x1f, 0x20, 0x03, 0xd5 // nop
+ };
+ const uint8_t nopData[] = { 0x1f, 0x20, 0x03, 0xd5 }; // nop
+
+ if (btiEntry) {
+ memcpy(buf, btiData, sizeof(btiData));
+ buf += sizeof(btiData);
+ pltEntryAddr += sizeof(btiData);
+ }
+
+ memcpy(buf, addrInst, sizeof(addrInst));
+ relocateOne(buf, R_AARCH64_ADR_PREL_PG_HI21,
+ getAArch64Page(gotPltEntryAddr) -
+ getAArch64Page(pltEntryAddr));
+ relocateOne(buf + 4, R_AARCH64_LDST64_ABS_LO12_NC, gotPltEntryAddr);
+ relocateOne(buf + 8, R_AARCH64_ADD_ABS_LO12_NC, gotPltEntryAddr);
+
+ if (pacEntry)
+ memcpy(buf + sizeof(addrInst), pacBr, sizeof(pacBr));
+ else
+ memcpy(buf + sizeof(addrInst), stdBr, sizeof(stdBr));
+ if (!btiEntry)
+ // We didn't add the BTI c instruction so round out size with NOP.
+ memcpy(buf + sizeof(addrInst) + sizeof(stdBr), nopData, sizeof(nopData));
+}
+
+static TargetInfo *getTargetInfo() {
+ if (config->andFeatures & (GNU_PROPERTY_AARCH64_FEATURE_1_BTI |
+ GNU_PROPERTY_AARCH64_FEATURE_1_PAC)) {
+ static AArch64BtiPac t;
+ return &t;
+ }
+ static AArch64 t;
+ return &t;
+}
+
+TargetInfo *elf::getAArch64TargetInfo() { return getTargetInfo(); }
diff --git a/contrib/llvm/tools/lld/ELF/Arch/AMDGPU.cpp b/contrib/llvm/tools/lld/ELF/Arch/AMDGPU.cpp
index a7c6c84ceecd..f2e32ca0996d 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/AMDGPU.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/AMDGPU.cpp
@@ -1,9 +1,8 @@
//===- AMDGPU.cpp ---------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -26,62 +25,63 @@ class AMDGPU final : public TargetInfo {
public:
AMDGPU();
uint32_t calcEFlags() const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
};
} // namespace
AMDGPU::AMDGPU() {
- RelativeRel = R_AMDGPU_RELATIVE64;
- GotRel = R_AMDGPU_ABS64;
- NoneRel = R_AMDGPU_NONE;
- GotEntrySize = 8;
+ relativeRel = R_AMDGPU_RELATIVE64;
+ gotRel = R_AMDGPU_ABS64;
+ noneRel = R_AMDGPU_NONE;
+ symbolicRel = R_AMDGPU_ABS64;
}
-static uint32_t getEFlags(InputFile *File) {
- return cast<ObjFile<ELF64LE>>(File)->getObj().getHeader()->e_flags;
+static uint32_t getEFlags(InputFile *file) {
+ return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader()->e_flags;
}
uint32_t AMDGPU::calcEFlags() const {
- assert(!ObjectFiles.empty());
- uint32_t Ret = getEFlags(ObjectFiles[0]);
+ assert(!objectFiles.empty());
+ uint32_t ret = getEFlags(objectFiles[0]);
// Verify that all input files have the same e_flags.
- for (InputFile *F : makeArrayRef(ObjectFiles).slice(1)) {
- if (Ret == getEFlags(F))
+ for (InputFile *f : makeArrayRef(objectFiles).slice(1)) {
+ if (ret == getEFlags(f))
continue;
- error("incompatible e_flags: " + toString(F));
+ error("incompatible e_flags: " + toString(f));
return 0;
}
- return Ret;
+ return ret;
}
-void AMDGPU::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void AMDGPU::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_AMDGPU_ABS32:
case R_AMDGPU_GOTPCREL:
case R_AMDGPU_GOTPCREL32_LO:
case R_AMDGPU_REL32:
case R_AMDGPU_REL32_LO:
- write32le(Loc, Val);
+ write32le(loc, val);
break;
case R_AMDGPU_ABS64:
case R_AMDGPU_REL64:
- write64le(Loc, Val);
+ write64le(loc, val);
break;
case R_AMDGPU_GOTPCREL32_HI:
case R_AMDGPU_REL32_HI:
- write32le(Loc, Val >> 32);
+ write32le(loc, val >> 32);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unknown relocation");
}
}
-RelExpr AMDGPU::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr AMDGPU::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_AMDGPU_ABS32:
case R_AMDGPU_ABS64:
return R_ABS;
@@ -95,11 +95,19 @@ RelExpr AMDGPU::getRelExpr(RelType Type, const Symbol &S,
case R_AMDGPU_GOTPCREL32_HI:
return R_GOT_PC;
default:
- return R_INVALID;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
+RelType AMDGPU::getDynRel(RelType type) const {
+ if (type == R_AMDGPU_ABS64)
+ return type;
+ return R_AMDGPU_NONE;
+}
+
TargetInfo *elf::getAMDGPUTargetInfo() {
- static AMDGPU Target;
- return &Target;
+ static AMDGPU target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/ARM.cpp b/contrib/llvm/tools/lld/ELF/Arch/ARM.cpp
index 120caca671af..64adc33c07ae 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/ARM.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/ARM.cpp
@@ -1,9 +1,8 @@
//===- ARM.cpp ------------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -27,63 +26,62 @@ class ARM final : public TargetInfo {
public:
ARM();
uint32_t calcEFlags() const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- RelType getDynRel(RelType Type) const override;
- int64_t getImplicitAddend(const uint8_t *Buf, RelType Type) const override;
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writeIgotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- void addPltSymbols(InputSection &IS, uint64_t Off) const override;
- void addPltHeaderSymbols(InputSection &ISD) const override;
- bool needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ void addPltSymbols(InputSection &isec, uint64_t off) const override;
+ void addPltHeaderSymbols(InputSection &isd) const override;
+ bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const override;
uint32_t getThunkSectionSpacing() const override;
- bool inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
ARM::ARM() {
- CopyRel = R_ARM_COPY;
- RelativeRel = R_ARM_RELATIVE;
- IRelativeRel = R_ARM_IRELATIVE;
- GotRel = R_ARM_GLOB_DAT;
- NoneRel = R_ARM_NONE;
- PltRel = R_ARM_JUMP_SLOT;
- TlsGotRel = R_ARM_TLS_TPOFF32;
- TlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
- TlsOffsetRel = R_ARM_TLS_DTPOFF32;
- GotBaseSymInGotPlt = false;
- GotEntrySize = 4;
- GotPltEntrySize = 4;
- PltEntrySize = 16;
- PltHeaderSize = 32;
- TrapInstr = {0xd4, 0xd4, 0xd4, 0xd4};
- NeedsThunks = true;
+ copyRel = R_ARM_COPY;
+ relativeRel = R_ARM_RELATIVE;
+ iRelativeRel = R_ARM_IRELATIVE;
+ gotRel = R_ARM_GLOB_DAT;
+ noneRel = R_ARM_NONE;
+ pltRel = R_ARM_JUMP_SLOT;
+ symbolicRel = R_ARM_ABS32;
+ tlsGotRel = R_ARM_TLS_TPOFF32;
+ tlsModuleIndexRel = R_ARM_TLS_DTPMOD32;
+ tlsOffsetRel = R_ARM_TLS_DTPOFF32;
+ gotBaseSymInGotPlt = false;
+ pltEntrySize = 16;
+ pltHeaderSize = 32;
+ trapInstr = {0xd4, 0xd4, 0xd4, 0xd4};
+ needsThunks = true;
}
uint32_t ARM::calcEFlags() const {
// The ABIFloatType is used by loaders to detect the floating point calling
// convention.
- uint32_t ABIFloatType = 0;
- if (Config->ARMVFPArgs == ARMVFPArgKind::Base ||
- Config->ARMVFPArgs == ARMVFPArgKind::Default)
- ABIFloatType = EF_ARM_ABI_FLOAT_SOFT;
- else if (Config->ARMVFPArgs == ARMVFPArgKind::VFP)
- ABIFloatType = EF_ARM_ABI_FLOAT_HARD;
+ uint32_t abiFloatType = 0;
+ if (config->armVFPArgs == ARMVFPArgKind::Base ||
+ config->armVFPArgs == ARMVFPArgKind::Default)
+ abiFloatType = EF_ARM_ABI_FLOAT_SOFT;
+ else if (config->armVFPArgs == ARMVFPArgKind::VFP)
+ abiFloatType = EF_ARM_ABI_FLOAT_HARD;
// We don't currently use any features incompatible with EF_ARM_EABI_VER5,
// but we don't have any firm guarantees of conformance. Linux AArch64
// kernels (as of 2016) require an EABI version to be set.
- return EF_ARM_EABI_VER5 | ABIFloatType;
+ return EF_ARM_EABI_VER5 | abiFloatType;
}
-RelExpr ARM::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr ARM::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_ARM_THM_JUMP11:
return R_PC;
case R_ARM_CALL:
@@ -108,11 +106,11 @@ RelExpr ARM::getRelExpr(RelType Type, const Symbol &S,
case R_ARM_SBREL32:
return R_ARM_SBREL;
case R_ARM_TARGET1:
- return Config->Target1Rel ? R_PC : R_ABS;
+ return config->target1Rel ? R_PC : R_ABS;
case R_ARM_TARGET2:
- if (Config->Target2 == Target2Policy::Rel)
+ if (config->target2 == Target2Policy::Rel)
return R_PC;
- if (Config->Target2 == Target2Policy::Abs)
+ if (config->target2 == Target2Policy::Abs)
return R_ABS;
return R_GOT_PC;
case R_ARM_TLS_GD32:
@@ -145,25 +143,25 @@ RelExpr ARM::getRelExpr(RelType Type, const Symbol &S,
}
}
-RelType ARM::getDynRel(RelType Type) const {
- if ((Type == R_ARM_ABS32) || (Type == R_ARM_TARGET1 && !Config->Target1Rel))
+RelType ARM::getDynRel(RelType type) const {
+ if ((type == R_ARM_ABS32) || (type == R_ARM_TARGET1 && !config->target1Rel))
return R_ARM_ABS32;
return R_ARM_NONE;
}
-void ARM::writeGotPlt(uint8_t *Buf, const Symbol &) const {
- write32le(Buf, In.Plt->getVA());
+void ARM::writeGotPlt(uint8_t *buf, const Symbol &) const {
+ write32le(buf, in.plt->getVA());
}
-void ARM::writeIgotPlt(uint8_t *Buf, const Symbol &S) const {
+void ARM::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
// An ARM entry is the address of the ifunc resolver function.
- write32le(Buf, S.getVA());
+ write32le(buf, s.getVA());
}
// Long form PLT Header that does not have any restrictions on the displacement
// of the .plt from the .plt.got.
-static void writePltHeaderLong(uint8_t *Buf) {
- const uint8_t PltData[] = {
+static void writePltHeaderLong(uint8_t *buf) {
+ const uint8_t pltData[] = {
0x04, 0xe0, 0x2d, 0xe5, // str lr, [sp,#-4]!
0x04, 0xe0, 0x9f, 0xe5, // ldr lr, L2
0x0e, 0xe0, 0x8f, 0xe0, // L1: add lr, pc, lr
@@ -172,128 +170,128 @@ static void writePltHeaderLong(uint8_t *Buf) {
0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary
0xd4, 0xd4, 0xd4, 0xd4, // Pad to 32-byte boundary
0xd4, 0xd4, 0xd4, 0xd4};
- memcpy(Buf, PltData, sizeof(PltData));
- uint64_t GotPlt = In.GotPlt->getVA();
- uint64_t L1 = In.Plt->getVA() + 8;
- write32le(Buf + 16, GotPlt - L1 - 8);
+ memcpy(buf, pltData, sizeof(pltData));
+ uint64_t gotPlt = in.gotPlt->getVA();
+ uint64_t l1 = in.plt->getVA() + 8;
+ write32le(buf + 16, gotPlt - l1 - 8);
}
// The default PLT header requires the .plt.got to be within 128 Mb of the
// .plt in the positive direction.
-void ARM::writePltHeader(uint8_t *Buf) const {
+void ARM::writePltHeader(uint8_t *buf) const {
// Use a similar sequence to that in writePlt(), the difference is the calling
// conventions mean we use lr instead of ip. The PLT entry is responsible for
// saving lr on the stack, the dynamic loader is responsible for reloading
// it.
- const uint32_t PltData[] = {
+ const uint32_t pltData[] = {
0xe52de004, // L1: str lr, [sp,#-4]!
0xe28fe600, // add lr, pc, #0x0NN00000 &(.got.plt - L1 - 4)
0xe28eea00, // add lr, lr, #0x000NN000 &(.got.plt - L1 - 4)
0xe5bef000, // ldr pc, [lr, #0x00000NNN] &(.got.plt -L1 - 4)
};
- uint64_t Offset = In.GotPlt->getVA() - In.Plt->getVA() - 4;
- if (!llvm::isUInt<27>(Offset)) {
+ uint64_t offset = in.gotPlt->getVA() - in.plt->getVA() - 4;
+ if (!llvm::isUInt<27>(offset)) {
// We cannot encode the Offset, use the long form.
- writePltHeaderLong(Buf);
+ writePltHeaderLong(buf);
return;
}
- write32le(Buf + 0, PltData[0]);
- write32le(Buf + 4, PltData[1] | ((Offset >> 20) & 0xff));
- write32le(Buf + 8, PltData[2] | ((Offset >> 12) & 0xff));
- write32le(Buf + 12, PltData[3] | (Offset & 0xfff));
- memcpy(Buf + 16, TrapInstr.data(), 4); // Pad to 32-byte boundary
- memcpy(Buf + 20, TrapInstr.data(), 4);
- memcpy(Buf + 24, TrapInstr.data(), 4);
- memcpy(Buf + 28, TrapInstr.data(), 4);
+ write32le(buf + 0, pltData[0]);
+ write32le(buf + 4, pltData[1] | ((offset >> 20) & 0xff));
+ write32le(buf + 8, pltData[2] | ((offset >> 12) & 0xff));
+ write32le(buf + 12, pltData[3] | (offset & 0xfff));
+ memcpy(buf + 16, trapInstr.data(), 4); // Pad to 32-byte boundary
+ memcpy(buf + 20, trapInstr.data(), 4);
+ memcpy(buf + 24, trapInstr.data(), 4);
+ memcpy(buf + 28, trapInstr.data(), 4);
}
-void ARM::addPltHeaderSymbols(InputSection &IS) const {
- addSyntheticLocal("$a", STT_NOTYPE, 0, 0, IS);
- addSyntheticLocal("$d", STT_NOTYPE, 16, 0, IS);
+void ARM::addPltHeaderSymbols(InputSection &isec) const {
+ addSyntheticLocal("$a", STT_NOTYPE, 0, 0, isec);
+ addSyntheticLocal("$d", STT_NOTYPE, 16, 0, isec);
}
// Long form PLT entries that do not have any restrictions on the displacement
// of the .plt from the .plt.got.
-static void writePltLong(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) {
- const uint8_t PltData[] = {
+static void writePltLong(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) {
+ const uint8_t pltData[] = {
0x04, 0xc0, 0x9f, 0xe5, // ldr ip, L2
0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc
0x00, 0xf0, 0x9c, 0xe5, // ldr pc, [ip]
0x00, 0x00, 0x00, 0x00, // L2: .word Offset(&(.plt.got) - L1 - 8
};
- memcpy(Buf, PltData, sizeof(PltData));
- uint64_t L1 = PltEntryAddr + 4;
- write32le(Buf + 12, GotPltEntryAddr - L1 - 8);
+ memcpy(buf, pltData, sizeof(pltData));
+ uint64_t l1 = pltEntryAddr + 4;
+ write32le(buf + 12, gotPltEntryAddr - l1 - 8);
}
// The default PLT entries require the .plt.got to be within 128 Mb of the
// .plt in the positive direction.
-void ARM::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
+void ARM::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
// The PLT entry is similar to the example given in Appendix A of ELF for
// the Arm Architecture. Instead of using the Group Relocations to find the
// optimal rotation for the 8-bit immediate used in the add instructions we
// hard code the most compact rotations for simplicity. This saves a load
// instruction over the long plt sequences.
- const uint32_t PltData[] = {
+ const uint32_t pltData[] = {
0xe28fc600, // L1: add ip, pc, #0x0NN00000 Offset(&(.plt.got) - L1 - 8
0xe28cca00, // add ip, ip, #0x000NN000 Offset(&(.plt.got) - L1 - 8
0xe5bcf000, // ldr pc, [ip, #0x00000NNN] Offset(&(.plt.got) - L1 - 8
};
- uint64_t Offset = GotPltEntryAddr - PltEntryAddr - 8;
- if (!llvm::isUInt<27>(Offset)) {
+ uint64_t offset = gotPltEntryAddr - pltEntryAddr - 8;
+ if (!llvm::isUInt<27>(offset)) {
// We cannot encode the Offset, use the long form.
- writePltLong(Buf, GotPltEntryAddr, PltEntryAddr, Index, RelOff);
+ writePltLong(buf, gotPltEntryAddr, pltEntryAddr, index, relOff);
return;
}
- write32le(Buf + 0, PltData[0] | ((Offset >> 20) & 0xff));
- write32le(Buf + 4, PltData[1] | ((Offset >> 12) & 0xff));
- write32le(Buf + 8, PltData[2] | (Offset & 0xfff));
- memcpy(Buf + 12, TrapInstr.data(), 4); // Pad to 16-byte boundary
+ write32le(buf + 0, pltData[0] | ((offset >> 20) & 0xff));
+ write32le(buf + 4, pltData[1] | ((offset >> 12) & 0xff));
+ write32le(buf + 8, pltData[2] | (offset & 0xfff));
+ memcpy(buf + 12, trapInstr.data(), 4); // Pad to 16-byte boundary
}
-void ARM::addPltSymbols(InputSection &IS, uint64_t Off) const {
- addSyntheticLocal("$a", STT_NOTYPE, Off, 0, IS);
- addSyntheticLocal("$d", STT_NOTYPE, Off + 12, 0, IS);
+void ARM::addPltSymbols(InputSection &isec, uint64_t off) const {
+ addSyntheticLocal("$a", STT_NOTYPE, off, 0, isec);
+ addSyntheticLocal("$d", STT_NOTYPE, off + 12, 0, isec);
}
-bool ARM::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const {
+bool ARM::needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const {
// If S is an undefined weak symbol and does not have a PLT entry then it
// will be resolved as a branch to the next instruction.
- if (S.isUndefWeak() && !S.isInPlt())
+ if (s.isUndefWeak() && !s.isInPlt())
return false;
// A state change from ARM to Thumb and vice versa must go through an
// interworking thunk if the relocation type is not R_ARM_CALL or
// R_ARM_THM_CALL.
- switch (Type) {
+ switch (type) {
case R_ARM_PC24:
case R_ARM_PLT32:
case R_ARM_JUMP24:
// Source is ARM, all PLT entries are ARM so no interworking required.
// Otherwise we need to interwork if Symbol has bit 0 set (Thumb).
- if (Expr == R_PC && ((S.getVA() & 1) == 1))
+ if (expr == R_PC && ((s.getVA() & 1) == 1))
return true;
LLVM_FALLTHROUGH;
case R_ARM_CALL: {
- uint64_t Dst = (Expr == R_PLT_PC) ? S.getPltVA() : S.getVA();
- return !inBranchRange(Type, BranchAddr, Dst);
+ uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+ return !inBranchRange(type, branchAddr, dst);
}
case R_ARM_THM_JUMP19:
case R_ARM_THM_JUMP24:
// Source is Thumb, all PLT entries are ARM so interworking is required.
// Otherwise we need to interwork if Symbol has bit 0 clear (ARM).
- if (Expr == R_PLT_PC || ((S.getVA() & 1) == 0))
+ if (expr == R_PLT_PC || ((s.getVA() & 1) == 0))
return true;
LLVM_FALLTHROUGH;
case R_ARM_THM_CALL: {
- uint64_t Dst = (Expr == R_PLT_PC) ? S.getPltVA() : S.getVA();
- return !inBranchRange(Type, BranchAddr, Dst);
+ uint64_t dst = (expr == R_PLT_PC) ? s.getPltVA() : s.getVA();
+ return !inBranchRange(type, branchAddr, dst);
}
}
return false;
@@ -301,13 +299,13 @@ bool ARM::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
uint32_t ARM::getThunkSectionSpacing() const {
// The placing of pre-created ThunkSections is controlled by the value
- // ThunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to
+ // thunkSectionSpacing returned by getThunkSectionSpacing(). The aim is to
// place the ThunkSection such that all branches from the InputSections
// prior to the ThunkSection can reach a Thunk placed at the end of the
// ThunkSection. Graphically:
- // | up to ThunkSectionSpacing .text input sections |
+ // | up to thunkSectionSpacing .text input sections |
// | ThunkSection |
- // | up to ThunkSectionSpacing .text input sections |
+ // | up to thunkSectionSpacing .text input sections |
// | ThunkSection |
// Pre-created ThunkSections are spaced roughly 16MiB apart on ARMv7. This
@@ -318,69 +316,68 @@ uint32_t ARM::getThunkSectionSpacing() const {
// Thumb B<cc>.W range +/- 1MiB
// If a branch cannot reach a pre-created ThunkSection a new one will be
// created so we can handle the rare cases of a Thumb 2 conditional branch.
- // We intentionally use a lower size for ThunkSectionSpacing than the maximum
+ // We intentionally use a lower size for thunkSectionSpacing than the maximum
// branch range so the end of the ThunkSection is more likely to be within
// range of the branch instruction that is furthest away. The value we shorten
- // ThunkSectionSpacing by is set conservatively to allow us to create 16,384
+ // thunkSectionSpacing by is set conservatively to allow us to create 16,384
// 12 byte Thunks at any offset in a ThunkSection without risk of a branch to
// one of the Thunks going out of range.
- // On Arm the ThunkSectionSpacing depends on the range of the Thumb Branch
+ // On Arm the thunkSectionSpacing depends on the range of the Thumb Branch
// range. On earlier Architectures such as ARMv4, ARMv5 and ARMv6 (except
// ARMv6T2) the range is +/- 4MiB.
- return (Config->ARMJ1J2BranchEncoding) ? 0x1000000 - 0x30000
+ return (config->armJ1J2BranchEncoding) ? 0x1000000 - 0x30000
: 0x400000 - 0x7500;
}
-bool ARM::inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const {
- uint64_t Range;
- uint64_t InstrSize;
+bool ARM::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
+ uint64_t range;
+ uint64_t instrSize;
- switch (Type) {
+ switch (type) {
case R_ARM_PC24:
case R_ARM_PLT32:
case R_ARM_JUMP24:
case R_ARM_CALL:
- Range = 0x2000000;
- InstrSize = 4;
+ range = 0x2000000;
+ instrSize = 4;
break;
case R_ARM_THM_JUMP19:
- Range = 0x100000;
- InstrSize = 2;
+ range = 0x100000;
+ instrSize = 2;
break;
case R_ARM_THM_JUMP24:
case R_ARM_THM_CALL:
- Range = Config->ARMJ1J2BranchEncoding ? 0x1000000 : 0x400000;
- InstrSize = 2;
+ range = config->armJ1J2BranchEncoding ? 0x1000000 : 0x400000;
+ instrSize = 2;
break;
default:
return true;
}
// PC at Src is 2 instructions ahead, immediate of branch is signed
- if (Src > Dst)
- Range -= 2 * InstrSize;
+ if (src > dst)
+ range -= 2 * instrSize;
else
- Range += InstrSize;
+ range += instrSize;
- if ((Dst & 0x1) == 0)
+ if ((dst & 0x1) == 0)
// Destination is ARM, if ARM caller then Src is already 4-byte aligned.
// If Thumb Caller (BLX) the Src address has bottom 2 bits cleared to ensure
// destination will be 4 byte aligned.
- Src &= ~0x3;
+ src &= ~0x3;
else
// Bit 0 == 1 denotes Thumb state, it is not part of the range
- Dst &= ~0x1;
+ dst &= ~0x1;
- uint64_t Distance = (Src > Dst) ? Src - Dst : Dst - Src;
- return Distance <= Range;
+ uint64_t distance = (src > dst) ? src - dst : dst - src;
+ return distance <= range;
}
-void ARM::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void ARM::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_ARM_ABS32:
case R_ARM_BASE_PREL:
- case R_ARM_GLOB_DAT:
case R_ARM_GOTOFF32:
case R_ARM_GOT_BREL:
case R_ARM_GOT_PREL:
@@ -396,135 +393,132 @@ void ARM::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_ARM_TLS_LE32:
case R_ARM_TLS_TPOFF32:
case R_ARM_TLS_DTPOFF32:
- write32le(Loc, Val);
- break;
- case R_ARM_TLS_DTPMOD32:
- write32le(Loc, 1);
+ write32le(loc, val);
break;
case R_ARM_PREL31:
- checkInt(Loc, Val, 31, Type);
- write32le(Loc, (read32le(Loc) & 0x80000000) | (Val & ~0x80000000));
+ checkInt(loc, val, 31, type);
+ write32le(loc, (read32le(loc) & 0x80000000) | (val & ~0x80000000));
break;
case R_ARM_CALL:
// R_ARM_CALL is used for BL and BLX instructions, depending on the
// value of bit 0 of Val, we must select a BL or BLX instruction
- if (Val & 1) {
+ if (val & 1) {
// If bit 0 of Val is 1 the target is Thumb, we must select a BLX.
// The BLX encoding is 0xfa:H:imm24 where Val = imm24:H:'1'
- checkInt(Loc, Val, 26, Type);
- write32le(Loc, 0xfa000000 | // opcode
- ((Val & 2) << 23) | // H
- ((Val >> 2) & 0x00ffffff)); // imm24
+ checkInt(loc, val, 26, type);
+ write32le(loc, 0xfa000000 | // opcode
+ ((val & 2) << 23) | // H
+ ((val >> 2) & 0x00ffffff)); // imm24
break;
}
- if ((read32le(Loc) & 0xfe000000) == 0xfa000000)
+ if ((read32le(loc) & 0xfe000000) == 0xfa000000)
// BLX (always unconditional) instruction to an ARM Target, select an
// unconditional BL.
- write32le(Loc, 0xeb000000 | (read32le(Loc) & 0x00ffffff));
+ write32le(loc, 0xeb000000 | (read32le(loc) & 0x00ffffff));
// fall through as BL encoding is shared with B
LLVM_FALLTHROUGH;
case R_ARM_JUMP24:
case R_ARM_PC24:
case R_ARM_PLT32:
- checkInt(Loc, Val, 26, Type);
- write32le(Loc, (read32le(Loc) & ~0x00ffffff) | ((Val >> 2) & 0x00ffffff));
+ checkInt(loc, val, 26, type);
+ write32le(loc, (read32le(loc) & ~0x00ffffff) | ((val >> 2) & 0x00ffffff));
break;
case R_ARM_THM_JUMP11:
- checkInt(Loc, Val, 12, Type);
- write16le(Loc, (read32le(Loc) & 0xf800) | ((Val >> 1) & 0x07ff));
+ checkInt(loc, val, 12, type);
+ write16le(loc, (read32le(loc) & 0xf800) | ((val >> 1) & 0x07ff));
break;
case R_ARM_THM_JUMP19:
// Encoding T3: Val = S:J2:J1:imm6:imm11:0
- checkInt(Loc, Val, 21, Type);
- write16le(Loc,
- (read16le(Loc) & 0xfbc0) | // opcode cond
- ((Val >> 10) & 0x0400) | // S
- ((Val >> 12) & 0x003f)); // imm6
- write16le(Loc + 2,
+ checkInt(loc, val, 21, type);
+ write16le(loc,
+ (read16le(loc) & 0xfbc0) | // opcode cond
+ ((val >> 10) & 0x0400) | // S
+ ((val >> 12) & 0x003f)); // imm6
+ write16le(loc + 2,
0x8000 | // opcode
- ((Val >> 8) & 0x0800) | // J2
- ((Val >> 5) & 0x2000) | // J1
- ((Val >> 1) & 0x07ff)); // imm11
+ ((val >> 8) & 0x0800) | // J2
+ ((val >> 5) & 0x2000) | // J1
+ ((val >> 1) & 0x07ff)); // imm11
break;
case R_ARM_THM_CALL:
// R_ARM_THM_CALL is used for BL and BLX instructions, depending on the
// value of bit 0 of Val, we must select a BL or BLX instruction
- if ((Val & 1) == 0) {
+ if ((val & 1) == 0) {
// Ensure BLX destination is 4-byte aligned. As BLX instruction may
// only be two byte aligned. This must be done before overflow check
- Val = alignTo(Val, 4);
+ val = alignTo(val, 4);
}
// Bit 12 is 0 for BLX, 1 for BL
- write16le(Loc + 2, (read16le(Loc + 2) & ~0x1000) | (Val & 1) << 12);
- if (!Config->ARMJ1J2BranchEncoding) {
+ write16le(loc + 2, (read16le(loc + 2) & ~0x1000) | (val & 1) << 12);
+ if (!config->armJ1J2BranchEncoding) {
// Older Arm architectures do not support R_ARM_THM_JUMP24 and have
// different encoding rules and range due to J1 and J2 always being 1.
- checkInt(Loc, Val, 23, Type);
- write16le(Loc,
+ checkInt(loc, val, 23, type);
+ write16le(loc,
0xf000 | // opcode
- ((Val >> 12) & 0x07ff)); // imm11
- write16le(Loc + 2,
- (read16le(Loc + 2) & 0xd000) | // opcode
+ ((val >> 12) & 0x07ff)); // imm11
+ write16le(loc + 2,
+ (read16le(loc + 2) & 0xd000) | // opcode
0x2800 | // J1 == J2 == 1
- ((Val >> 1) & 0x07ff)); // imm11
+ ((val >> 1) & 0x07ff)); // imm11
break;
}
// Fall through as rest of encoding is the same as B.W
LLVM_FALLTHROUGH;
case R_ARM_THM_JUMP24:
// Encoding B T4, BL T1, BLX T2: Val = S:I1:I2:imm10:imm11:0
- checkInt(Loc, Val, 25, Type);
- write16le(Loc,
+ checkInt(loc, val, 25, type);
+ write16le(loc,
0xf000 | // opcode
- ((Val >> 14) & 0x0400) | // S
- ((Val >> 12) & 0x03ff)); // imm10
- write16le(Loc + 2,
- (read16le(Loc + 2) & 0xd000) | // opcode
- (((~(Val >> 10)) ^ (Val >> 11)) & 0x2000) | // J1
- (((~(Val >> 11)) ^ (Val >> 13)) & 0x0800) | // J2
- ((Val >> 1) & 0x07ff)); // imm11
+ ((val >> 14) & 0x0400) | // S
+ ((val >> 12) & 0x03ff)); // imm10
+ write16le(loc + 2,
+ (read16le(loc + 2) & 0xd000) | // opcode
+ (((~(val >> 10)) ^ (val >> 11)) & 0x2000) | // J1
+ (((~(val >> 11)) ^ (val >> 13)) & 0x0800) | // J2
+ ((val >> 1) & 0x07ff)); // imm11
break;
case R_ARM_MOVW_ABS_NC:
case R_ARM_MOVW_PREL_NC:
- write32le(Loc, (read32le(Loc) & ~0x000f0fff) | ((Val & 0xf000) << 4) |
- (Val & 0x0fff));
+ write32le(loc, (read32le(loc) & ~0x000f0fff) | ((val & 0xf000) << 4) |
+ (val & 0x0fff));
break;
case R_ARM_MOVT_ABS:
case R_ARM_MOVT_PREL:
- write32le(Loc, (read32le(Loc) & ~0x000f0fff) |
- (((Val >> 16) & 0xf000) << 4) | ((Val >> 16) & 0xfff));
+ write32le(loc, (read32le(loc) & ~0x000f0fff) |
+ (((val >> 16) & 0xf000) << 4) | ((val >> 16) & 0xfff));
break;
case R_ARM_THM_MOVT_ABS:
case R_ARM_THM_MOVT_PREL:
// Encoding T1: A = imm4:i:imm3:imm8
- write16le(Loc,
+ write16le(loc,
0xf2c0 | // opcode
- ((Val >> 17) & 0x0400) | // i
- ((Val >> 28) & 0x000f)); // imm4
- write16le(Loc + 2,
- (read16le(Loc + 2) & 0x8f00) | // opcode
- ((Val >> 12) & 0x7000) | // imm3
- ((Val >> 16) & 0x00ff)); // imm8
+ ((val >> 17) & 0x0400) | // i
+ ((val >> 28) & 0x000f)); // imm4
+ write16le(loc + 2,
+ (read16le(loc + 2) & 0x8f00) | // opcode
+ ((val >> 12) & 0x7000) | // imm3
+ ((val >> 16) & 0x00ff)); // imm8
break;
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVW_PREL_NC:
// Encoding T3: A = imm4:i:imm3:imm8
- write16le(Loc,
+ write16le(loc,
0xf240 | // opcode
- ((Val >> 1) & 0x0400) | // i
- ((Val >> 12) & 0x000f)); // imm4
- write16le(Loc + 2,
- (read16le(Loc + 2) & 0x8f00) | // opcode
- ((Val << 4) & 0x7000) | // imm3
- (Val & 0x00ff)); // imm8
+ ((val >> 1) & 0x0400) | // i
+ ((val >> 12) & 0x000f)); // imm4
+ write16le(loc + 2,
+ (read16le(loc + 2) & 0x8f00) | // opcode
+ ((val << 4) & 0x7000) | // imm3
+ (val & 0x00ff)); // imm8
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
}
}
-int64_t ARM::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
- switch (Type) {
+int64_t ARM::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
default:
return 0;
case R_ARM_ABS32:
@@ -540,47 +534,47 @@ int64_t ARM::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
case R_ARM_TLS_LDO32:
case R_ARM_TLS_IE32:
case R_ARM_TLS_LE32:
- return SignExtend64<32>(read32le(Buf));
+ return SignExtend64<32>(read32le(buf));
case R_ARM_PREL31:
- return SignExtend64<31>(read32le(Buf));
+ return SignExtend64<31>(read32le(buf));
case R_ARM_CALL:
case R_ARM_JUMP24:
case R_ARM_PC24:
case R_ARM_PLT32:
- return SignExtend64<26>(read32le(Buf) << 2);
+ return SignExtend64<26>(read32le(buf) << 2);
case R_ARM_THM_JUMP11:
- return SignExtend64<12>(read16le(Buf) << 1);
+ return SignExtend64<12>(read16le(buf) << 1);
case R_ARM_THM_JUMP19: {
// Encoding T3: A = S:J2:J1:imm10:imm6:0
- uint16_t Hi = read16le(Buf);
- uint16_t Lo = read16le(Buf + 2);
- return SignExtend64<20>(((Hi & 0x0400) << 10) | // S
- ((Lo & 0x0800) << 8) | // J2
- ((Lo & 0x2000) << 5) | // J1
- ((Hi & 0x003f) << 12) | // imm6
- ((Lo & 0x07ff) << 1)); // imm11:0
+ uint16_t hi = read16le(buf);
+ uint16_t lo = read16le(buf + 2);
+ return SignExtend64<20>(((hi & 0x0400) << 10) | // S
+ ((lo & 0x0800) << 8) | // J2
+ ((lo & 0x2000) << 5) | // J1
+ ((hi & 0x003f) << 12) | // imm6
+ ((lo & 0x07ff) << 1)); // imm11:0
}
case R_ARM_THM_CALL:
- if (!Config->ARMJ1J2BranchEncoding) {
+ if (!config->armJ1J2BranchEncoding) {
// Older Arm architectures do not support R_ARM_THM_JUMP24 and have
// different encoding rules and range due to J1 and J2 always being 1.
- uint16_t Hi = read16le(Buf);
- uint16_t Lo = read16le(Buf + 2);
- return SignExtend64<22>(((Hi & 0x7ff) << 12) | // imm11
- ((Lo & 0x7ff) << 1)); // imm11:0
+ uint16_t hi = read16le(buf);
+ uint16_t lo = read16le(buf + 2);
+ return SignExtend64<22>(((hi & 0x7ff) << 12) | // imm11
+ ((lo & 0x7ff) << 1)); // imm11:0
break;
}
LLVM_FALLTHROUGH;
case R_ARM_THM_JUMP24: {
// Encoding B T4, BL T1, BLX T2: A = S:I1:I2:imm10:imm11:0
// I1 = NOT(J1 EOR S), I2 = NOT(J2 EOR S)
- uint16_t Hi = read16le(Buf);
- uint16_t Lo = read16le(Buf + 2);
- return SignExtend64<24>(((Hi & 0x0400) << 14) | // S
- (~((Lo ^ (Hi << 3)) << 10) & 0x00800000) | // I1
- (~((Lo ^ (Hi << 1)) << 11) & 0x00400000) | // I2
- ((Hi & 0x003ff) << 12) | // imm0
- ((Lo & 0x007ff) << 1)); // imm11:0
+ uint16_t hi = read16le(buf);
+ uint16_t lo = read16le(buf + 2);
+ return SignExtend64<24>(((hi & 0x0400) << 14) | // S
+ (~((lo ^ (hi << 3)) << 10) & 0x00800000) | // I1
+ (~((lo ^ (hi << 1)) << 11) & 0x00400000) | // I2
+ ((hi & 0x003ff) << 12) | // imm0
+ ((lo & 0x007ff) << 1)); // imm11:0
}
// ELF for the ARM Architecture 4.6.1.1 the implicit addend for MOVW and
// MOVT is in the range -32768 <= A < 32768
@@ -588,25 +582,25 @@ int64_t ARM::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
case R_ARM_MOVT_ABS:
case R_ARM_MOVW_PREL_NC:
case R_ARM_MOVT_PREL: {
- uint64_t Val = read32le(Buf) & 0x000f0fff;
- return SignExtend64<16>(((Val & 0x000f0000) >> 4) | (Val & 0x00fff));
+ uint64_t val = read32le(buf) & 0x000f0fff;
+ return SignExtend64<16>(((val & 0x000f0000) >> 4) | (val & 0x00fff));
}
case R_ARM_THM_MOVW_ABS_NC:
case R_ARM_THM_MOVT_ABS:
case R_ARM_THM_MOVW_PREL_NC:
case R_ARM_THM_MOVT_PREL: {
// Encoding T3: A = imm4:i:imm3:imm8
- uint16_t Hi = read16le(Buf);
- uint16_t Lo = read16le(Buf + 2);
- return SignExtend64<16>(((Hi & 0x000f) << 12) | // imm4
- ((Hi & 0x0400) << 1) | // i
- ((Lo & 0x7000) >> 4) | // imm3
- (Lo & 0x00ff)); // imm8
+ uint16_t hi = read16le(buf);
+ uint16_t lo = read16le(buf + 2);
+ return SignExtend64<16>(((hi & 0x000f) << 12) | // imm4
+ ((hi & 0x0400) << 1) | // i
+ ((lo & 0x7000) >> 4) | // imm3
+ (lo & 0x00ff)); // imm8
}
}
}
TargetInfo *elf::getARMTargetInfo() {
- static ARM Target;
- return &Target;
+ static ARM target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/AVR.cpp b/contrib/llvm/tools/lld/ELF/Arch/AVR.cpp
index 637da3778bd2..869f0fe0c525 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/AVR.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/AVR.cpp
@@ -1,9 +1,8 @@
//===- AVR.cpp ------------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -44,34 +43,34 @@ namespace {
class AVR final : public TargetInfo {
public:
AVR();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
-AVR::AVR() { NoneRel = R_AVR_NONE; }
+AVR::AVR() { noneRel = R_AVR_NONE; }
-RelExpr AVR::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
+RelExpr AVR::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
return R_ABS;
}
-void AVR::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void AVR::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_AVR_CALL: {
- uint16_t Hi = Val >> 17;
- uint16_t Lo = Val >> 1;
- write16le(Loc, read16le(Loc) | ((Hi >> 1) << 4) | (Hi & 1));
- write16le(Loc + 2, Lo);
+ uint16_t hi = val >> 17;
+ uint16_t lo = val >> 1;
+ write16le(loc, read16le(loc) | ((hi >> 1) << 4) | (hi & 1));
+ write16le(loc + 2, lo);
break;
}
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + toString(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
}
}
TargetInfo *elf::getAVRTargetInfo() {
- static AVR Target;
- return &Target;
+ static AVR target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/Hexagon.cpp b/contrib/llvm/tools/lld/ELF/Arch/Hexagon.cpp
index b4d33be2ad39..c497a6df7987 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/Hexagon.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/Hexagon.cpp
@@ -1,9 +1,8 @@
//===-- Hexagon.cpp -------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -28,65 +27,65 @@ class Hexagon final : public TargetInfo {
public:
Hexagon();
uint32_t calcEFlags() const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
};
} // namespace
Hexagon::Hexagon() {
- PltRel = R_HEX_JMP_SLOT;
- RelativeRel = R_HEX_RELATIVE;
- GotRel = R_HEX_GLOB_DAT;
- GotEntrySize = 4;
+ pltRel = R_HEX_JMP_SLOT;
+ relativeRel = R_HEX_RELATIVE;
+ gotRel = R_HEX_GLOB_DAT;
+ symbolicRel = R_HEX_32;
+
// The zero'th GOT entry is reserved for the address of _DYNAMIC. The
// next 3 are reserved for the dynamic loader.
- GotPltHeaderEntriesNum = 4;
- GotPltEntrySize = 4;
+ gotPltHeaderEntriesNum = 4;
- PltEntrySize = 16;
- PltHeaderSize = 32;
+ pltEntrySize = 16;
+ pltHeaderSize = 32;
// Hexagon Linux uses 64K pages by default.
- DefaultMaxPageSize = 0x10000;
- NoneRel = R_HEX_NONE;
+ defaultMaxPageSize = 0x10000;
+ noneRel = R_HEX_NONE;
}
uint32_t Hexagon::calcEFlags() const {
- assert(!ObjectFiles.empty());
+ assert(!objectFiles.empty());
// The architecture revision must always be equal to or greater than
// greatest revision in the list of inputs.
- uint32_t Ret = 0;
- for (InputFile *F : ObjectFiles) {
- uint32_t EFlags = cast<ObjFile<ELF32LE>>(F)->getObj().getHeader()->e_flags;
- if (EFlags > Ret)
- Ret = EFlags;
+ uint32_t ret = 0;
+ for (InputFile *f : objectFiles) {
+ uint32_t eflags = cast<ObjFile<ELF32LE>>(f)->getObj().getHeader()->e_flags;
+ if (eflags > ret)
+ ret = eflags;
}
- return Ret;
+ return ret;
}
-static uint32_t applyMask(uint32_t Mask, uint32_t Data) {
- uint32_t Result = 0;
- size_t Off = 0;
+static uint32_t applyMask(uint32_t mask, uint32_t data) {
+ uint32_t result = 0;
+ size_t off = 0;
- for (size_t Bit = 0; Bit != 32; ++Bit) {
- uint32_t ValBit = (Data >> Off) & 1;
- uint32_t MaskBit = (Mask >> Bit) & 1;
- if (MaskBit) {
- Result |= (ValBit << Bit);
- ++Off;
+ for (size_t bit = 0; bit != 32; ++bit) {
+ uint32_t valBit = (data >> off) & 1;
+ uint32_t maskBit = (mask >> bit) & 1;
+ if (maskBit) {
+ result |= (valBit << bit);
+ ++off;
}
}
- return Result;
+ return result;
}
-RelExpr Hexagon::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr Hexagon::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_HEX_B9_PCREL:
case R_HEX_B9_PCREL_X:
case R_HEX_B13_PCREL:
@@ -109,16 +108,16 @@ RelExpr Hexagon::getRelExpr(RelType Type, const Symbol &S,
}
}
-static uint32_t findMaskR6(uint32_t Insn) {
+static uint32_t findMaskR6(uint32_t insn) {
// There are (arguably too) many relocation masks for the DSP's
// R_HEX_6_X type. The table below is used to select the correct mask
// for the given instruction.
struct InstructionMask {
- uint32_t CmpMask;
- uint32_t RelocMask;
+ uint32_t cmpMask;
+ uint32_t relocMask;
};
- static const InstructionMask R6[] = {
+ static const InstructionMask r6[] = {
{0x38000000, 0x0000201f}, {0x39000000, 0x0000201f},
{0x3e000000, 0x00001f80}, {0x3f000000, 0x00001f80},
{0x40000000, 0x000020f8}, {0x41000000, 0x000007e0},
@@ -136,124 +135,124 @@ static uint32_t findMaskR6(uint32_t Insn) {
// Duplex forms have a fixed mask and parse bits 15:14 are always
// zero. Non-duplex insns will always have at least one bit set in the
// parse field.
- if ((0xC000 & Insn) == 0x0)
+ if ((0xC000 & insn) == 0x0)
return 0x03f00000;
- for (InstructionMask I : R6)
- if ((0xff000000 & Insn) == I.CmpMask)
- return I.RelocMask;
+ for (InstructionMask i : r6)
+ if ((0xff000000 & insn) == i.cmpMask)
+ return i.relocMask;
error("unrecognized instruction for R_HEX_6 relocation: 0x" +
- utohexstr(Insn));
+ utohexstr(insn));
return 0;
}
-static uint32_t findMaskR8(uint32_t Insn) {
- if ((0xff000000 & Insn) == 0xde000000)
+static uint32_t findMaskR8(uint32_t insn) {
+ if ((0xff000000 & insn) == 0xde000000)
return 0x00e020e8;
- if ((0xff000000 & Insn) == 0x3c000000)
+ if ((0xff000000 & insn) == 0x3c000000)
return 0x0000207f;
return 0x00001fe0;
}
-static uint32_t findMaskR11(uint32_t Insn) {
- if ((0xff000000 & Insn) == 0xa1000000)
+static uint32_t findMaskR11(uint32_t insn) {
+ if ((0xff000000 & insn) == 0xa1000000)
return 0x060020ff;
return 0x06003fe0;
}
-static uint32_t findMaskR16(uint32_t Insn) {
- if ((0xff000000 & Insn) == 0x48000000)
+static uint32_t findMaskR16(uint32_t insn) {
+ if ((0xff000000 & insn) == 0x48000000)
return 0x061f20ff;
- if ((0xff000000 & Insn) == 0x49000000)
+ if ((0xff000000 & insn) == 0x49000000)
return 0x061f3fe0;
- if ((0xff000000 & Insn) == 0x78000000)
+ if ((0xff000000 & insn) == 0x78000000)
return 0x00df3fe0;
- if ((0xff000000 & Insn) == 0xb0000000)
+ if ((0xff000000 & insn) == 0xb0000000)
return 0x0fe03fe0;
error("unrecognized instruction for R_HEX_16_X relocation: 0x" +
- utohexstr(Insn));
+ utohexstr(insn));
return 0;
}
-static void or32le(uint8_t *P, int32_t V) { write32le(P, read32le(P) | V); }
+static void or32le(uint8_t *p, int32_t v) { write32le(p, read32le(p) | v); }
-void Hexagon::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void Hexagon::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_HEX_NONE:
break;
case R_HEX_6_PCREL_X:
case R_HEX_6_X:
- or32le(Loc, applyMask(findMaskR6(read32le(Loc)), Val));
+ or32le(loc, applyMask(findMaskR6(read32le(loc)), val));
break;
case R_HEX_8_X:
- or32le(Loc, applyMask(findMaskR8(read32le(Loc)), Val));
+ or32le(loc, applyMask(findMaskR8(read32le(loc)), val));
break;
case R_HEX_9_X:
- or32le(Loc, applyMask(0x00003fe0, Val & 0x3f));
+ or32le(loc, applyMask(0x00003fe0, val & 0x3f));
break;
case R_HEX_10_X:
- or32le(Loc, applyMask(0x00203fe0, Val & 0x3f));
+ or32le(loc, applyMask(0x00203fe0, val & 0x3f));
break;
case R_HEX_11_X:
case R_HEX_GOT_11_X:
- or32le(Loc, applyMask(findMaskR11(read32le(Loc)), Val & 0x3f));
+ or32le(loc, applyMask(findMaskR11(read32le(loc)), val & 0x3f));
break;
case R_HEX_12_X:
- or32le(Loc, applyMask(0x000007e0, Val));
+ or32le(loc, applyMask(0x000007e0, val));
break;
case R_HEX_16_X: // These relocs only have 6 effective bits.
case R_HEX_GOT_16_X:
- or32le(Loc, applyMask(findMaskR16(read32le(Loc)), Val & 0x3f));
+ or32le(loc, applyMask(findMaskR16(read32le(loc)), val & 0x3f));
break;
case R_HEX_32:
case R_HEX_32_PCREL:
- or32le(Loc, Val);
+ or32le(loc, val);
break;
case R_HEX_32_6_X:
case R_HEX_GOT_32_6_X:
- or32le(Loc, applyMask(0x0fff3fff, Val >> 6));
+ or32le(loc, applyMask(0x0fff3fff, val >> 6));
break;
case R_HEX_B9_PCREL:
- or32le(Loc, applyMask(0x003000fe, Val >> 2));
+ or32le(loc, applyMask(0x003000fe, val >> 2));
break;
case R_HEX_B9_PCREL_X:
- or32le(Loc, applyMask(0x003000fe, Val & 0x3f));
+ or32le(loc, applyMask(0x003000fe, val & 0x3f));
break;
case R_HEX_B13_PCREL:
- or32le(Loc, applyMask(0x00202ffe, Val >> 2));
+ or32le(loc, applyMask(0x00202ffe, val >> 2));
break;
case R_HEX_B15_PCREL:
- or32le(Loc, applyMask(0x00df20fe, Val >> 2));
+ or32le(loc, applyMask(0x00df20fe, val >> 2));
break;
case R_HEX_B15_PCREL_X:
- or32le(Loc, applyMask(0x00df20fe, Val & 0x3f));
+ or32le(loc, applyMask(0x00df20fe, val & 0x3f));
break;
case R_HEX_B22_PCREL:
case R_HEX_PLT_B22_PCREL:
- or32le(Loc, applyMask(0x1ff3ffe, Val >> 2));
+ or32le(loc, applyMask(0x1ff3ffe, val >> 2));
break;
case R_HEX_B22_PCREL_X:
- or32le(Loc, applyMask(0x1ff3ffe, Val & 0x3f));
+ or32le(loc, applyMask(0x1ff3ffe, val & 0x3f));
break;
case R_HEX_B32_PCREL_X:
- or32le(Loc, applyMask(0x0fff3fff, Val >> 6));
+ or32le(loc, applyMask(0x0fff3fff, val >> 6));
break;
case R_HEX_HI16:
- or32le(Loc, applyMask(0x00c03fff, Val >> 16));
+ or32le(loc, applyMask(0x00c03fff, val >> 16));
break;
case R_HEX_LO16:
- or32le(Loc, applyMask(0x00c03fff, Val));
+ or32le(loc, applyMask(0x00c03fff, val));
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + toString(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
break;
}
}
-void Hexagon::writePltHeader(uint8_t *Buf) const {
- const uint8_t PltData[] = {
+void Hexagon::writePltHeader(uint8_t *buf) const {
+ const uint8_t pltData[] = {
0x00, 0x40, 0x00, 0x00, // { immext (#0)
0x1c, 0xc0, 0x49, 0x6a, // r28 = add (pc, ##GOT0@PCREL) } # @GOT0
0x0e, 0x42, 0x9c, 0xe2, // { r14 -= add (r28, #16) # offset of GOTn
@@ -263,30 +262,30 @@ void Hexagon::writePltHeader(uint8_t *Buf) const {
0x00, 0xc0, 0x9c, 0x52, // jumpr r28 } # call dynamic linker
0x0c, 0xdb, 0x00, 0x54, // trap0(#0xdb) # bring plt0 into 16byte alignment
};
- memcpy(Buf, PltData, sizeof(PltData));
+ memcpy(buf, pltData, sizeof(pltData));
// Offset from PLT0 to the GOT.
- uint64_t Off = In.GotPlt->getVA() - In.Plt->getVA();
- relocateOne(Buf, R_HEX_B32_PCREL_X, Off);
- relocateOne(Buf + 4, R_HEX_6_PCREL_X, Off);
+ uint64_t off = in.gotPlt->getVA() - in.plt->getVA();
+ relocateOne(buf, R_HEX_B32_PCREL_X, off);
+ relocateOne(buf + 4, R_HEX_6_PCREL_X, off);
}
-void Hexagon::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Inst[] = {
+void Hexagon::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t inst[] = {
0x00, 0x40, 0x00, 0x00, // { immext (#0)
0x0e, 0xc0, 0x49, 0x6a, // r14 = add (pc, ##GOTn@PCREL) }
0x1c, 0xc0, 0x8e, 0x91, // r28 = memw (r14)
0x00, 0xc0, 0x9c, 0x52, // jumpr r28
};
- memcpy(Buf, Inst, sizeof(Inst));
+ memcpy(buf, inst, sizeof(inst));
- relocateOne(Buf, R_HEX_B32_PCREL_X, GotPltEntryAddr - PltEntryAddr);
- relocateOne(Buf + 4, R_HEX_6_PCREL_X, GotPltEntryAddr - PltEntryAddr);
+ relocateOne(buf, R_HEX_B32_PCREL_X, gotPltEntryAddr - pltEntryAddr);
+ relocateOne(buf + 4, R_HEX_6_PCREL_X, gotPltEntryAddr - pltEntryAddr);
}
TargetInfo *elf::getHexagonTargetInfo() {
- static Hexagon Target;
- return &Target;
+ static Hexagon target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/MSP430.cpp b/contrib/llvm/tools/lld/ELF/Arch/MSP430.cpp
index fe0c0fe64daf..90664396c85e 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/MSP430.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/MSP430.cpp
@@ -1,9 +1,8 @@
//===- MSP430.cpp ---------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -34,20 +33,20 @@ namespace {
class MSP430 final : public TargetInfo {
public:
MSP430();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
MSP430::MSP430() {
// mov.b #0, r3
- TrapInstr = {0x43, 0x43, 0x43, 0x43};
+ trapInstr = {0x43, 0x43, 0x43, 0x43};
}
-RelExpr MSP430::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr MSP430::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_MSP430_10_PCREL:
case R_MSP430_16_PCREL:
case R_MSP430_16_PCREL_BYTE:
@@ -60,35 +59,35 @@ RelExpr MSP430::getRelExpr(RelType Type, const Symbol &S,
}
}
-void MSP430::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void MSP430::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_MSP430_8:
- checkIntUInt(Loc, Val, 8, Type);
- *Loc = Val;
+ checkIntUInt(loc, val, 8, type);
+ *loc = val;
break;
case R_MSP430_16:
case R_MSP430_16_PCREL:
case R_MSP430_16_BYTE:
case R_MSP430_16_PCREL_BYTE:
- checkIntUInt(Loc, Val, 16, Type);
- write16le(Loc, Val);
+ checkIntUInt(loc, val, 16, type);
+ write16le(loc, val);
break;
case R_MSP430_32:
- checkIntUInt(Loc, Val, 32, Type);
- write32le(Loc, Val);
+ checkIntUInt(loc, val, 32, type);
+ write32le(loc, val);
break;
case R_MSP430_10_PCREL: {
- int16_t Offset = ((int16_t)Val >> 1) - 1;
- checkInt(Loc, Offset, 10, Type);
- write16le(Loc, (read16le(Loc) & 0xFC00) | (Offset & 0x3FF));
+ int16_t offset = ((int16_t)val >> 1) - 1;
+ checkInt(loc, offset, 10, type);
+ write16le(loc, (read16le(loc) & 0xFC00) | (offset & 0x3FF));
break;
}
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + toString(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
}
}
TargetInfo *elf::getMSP430TargetInfo() {
- static MSP430 Target;
- return &Target;
+ static MSP430 target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/Mips.cpp b/contrib/llvm/tools/lld/ELF/Arch/Mips.cpp
index 23b0c1dd8a2d..24b3957acd99 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/Mips.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/Mips.cpp
@@ -1,9 +1,8 @@
//===- MIPS.cpp -----------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -29,47 +28,47 @@ template <class ELFT> class MIPS final : public TargetInfo {
public:
MIPS();
uint32_t calcEFlags() const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- int64_t getImplicitAddend(const uint8_t *Buf, RelType Type) const override;
- RelType getDynRel(RelType Type) const override;
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- bool needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- bool usesOnlyLowPageBits(RelType Type) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ bool usesOnlyLowPageBits(RelType type) const override;
};
} // namespace
template <class ELFT> MIPS<ELFT>::MIPS() {
- GotPltHeaderEntriesNum = 2;
- DefaultMaxPageSize = 65536;
- GotEntrySize = sizeof(typename ELFT::uint);
- GotPltEntrySize = sizeof(typename ELFT::uint);
- GotBaseSymInGotPlt = false;
- PltEntrySize = 16;
- PltHeaderSize = 32;
- CopyRel = R_MIPS_COPY;
- NoneRel = R_MIPS_NONE;
- PltRel = R_MIPS_JUMP_SLOT;
- NeedsThunks = true;
+ gotPltHeaderEntriesNum = 2;
+ defaultMaxPageSize = 65536;
+ gotBaseSymInGotPlt = false;
+ pltEntrySize = 16;
+ pltHeaderSize = 32;
+ copyRel = R_MIPS_COPY;
+ noneRel = R_MIPS_NONE;
+ pltRel = R_MIPS_JUMP_SLOT;
+ needsThunks = true;
// Set `sigrie 1` as a trap instruction.
- write32(TrapInstr.data(), 0x04170001);
+ write32(trapInstr.data(), 0x04170001);
if (ELFT::Is64Bits) {
- RelativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
- TlsGotRel = R_MIPS_TLS_TPREL64;
- TlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
- TlsOffsetRel = R_MIPS_TLS_DTPREL64;
+ relativeRel = (R_MIPS_64 << 8) | R_MIPS_REL32;
+ symbolicRel = R_MIPS_64;
+ tlsGotRel = R_MIPS_TLS_TPREL64;
+ tlsModuleIndexRel = R_MIPS_TLS_DTPMOD64;
+ tlsOffsetRel = R_MIPS_TLS_DTPREL64;
} else {
- RelativeRel = R_MIPS_REL32;
- TlsGotRel = R_MIPS_TLS_TPREL32;
- TlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
- TlsOffsetRel = R_MIPS_TLS_DTPREL32;
+ relativeRel = R_MIPS_REL32;
+ symbolicRel = R_MIPS_32;
+ tlsGotRel = R_MIPS_TLS_TPREL32;
+ tlsModuleIndexRel = R_MIPS_TLS_DTPMOD32;
+ tlsOffsetRel = R_MIPS_TLS_DTPREL32;
}
}
@@ -78,13 +77,13 @@ template <class ELFT> uint32_t MIPS<ELFT>::calcEFlags() const {
}
template <class ELFT>
-RelExpr MIPS<ELFT>::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
+RelExpr MIPS<ELFT>::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
// See comment in the calculateMipsRelChain.
- if (ELFT::Is64Bits || Config->MipsN32Abi)
- Type &= 0xff;
+ if (ELFT::Is64Bits || config->mipsN32Abi)
+ type &= 0xff;
- switch (Type) {
+ switch (type) {
case R_MIPS_JALR:
case R_MICROMIPS_JALR:
return R_HINT;
@@ -108,9 +107,9 @@ RelExpr MIPS<ELFT>::getRelExpr(RelType Type, const Symbol &S,
// offset between start of function and 'gp' value which by default
// equal to the start of .got section. In that case we consider these
// relocations as relative.
- if (&S == ElfSym::MipsGpDisp)
+ if (&s == ElfSym::mipsGpDisp)
return R_MIPS_GOT_GP_PC;
- if (&S == ElfSym::MipsLocalGp)
+ if (&s == ElfSym::mipsLocalGp)
return R_MIPS_GOT_GP;
LLVM_FALLTHROUGH;
case R_MIPS_32:
@@ -147,7 +146,7 @@ RelExpr MIPS<ELFT>::getRelExpr(RelType Type, const Symbol &S,
return R_PC;
case R_MIPS_GOT16:
case R_MICROMIPS_GOT16:
- if (S.isLocal())
+ if (s.isLocal())
return R_MIPS_GOT_LOCAL_PAGE;
LLVM_FALLTHROUGH;
case R_MIPS_CALL16:
@@ -176,209 +175,213 @@ RelExpr MIPS<ELFT>::getRelExpr(RelType Type, const Symbol &S,
case R_MIPS_NONE:
return R_NONE;
default:
- return R_INVALID;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
-template <class ELFT> RelType MIPS<ELFT>::getDynRel(RelType Type) const {
- if (Type == R_MIPS_32 || Type == R_MIPS_64)
- return RelativeRel;
+template <class ELFT> RelType MIPS<ELFT>::getDynRel(RelType type) const {
+ if (type == symbolicRel)
+ return type;
return R_MIPS_NONE;
}
template <class ELFT>
-void MIPS<ELFT>::writeGotPlt(uint8_t *Buf, const Symbol &) const {
- uint64_t VA = In.Plt->getVA();
+void MIPS<ELFT>::writeGotPlt(uint8_t *buf, const Symbol &) const {
+ uint64_t va = in.plt->getVA();
if (isMicroMips())
- VA |= 1;
- write32<ELFT::TargetEndianness>(Buf, VA);
+ va |= 1;
+ write32<ELFT::TargetEndianness>(buf, va);
}
-template <endianness E> static uint32_t readShuffle(const uint8_t *Loc) {
+template <endianness E> static uint32_t readShuffle(const uint8_t *loc) {
// The major opcode of a microMIPS instruction needs to appear
// in the first 16-bit word (lowest address) for efficient hardware
// decode so that it knows if the instruction is 16-bit or 32-bit
// as early as possible. To do so, little-endian binaries keep 16-bit
// words in a big-endian order. That is why we have to swap these
// words to get a correct value.
- uint32_t V = read32<E>(Loc);
+ uint32_t v = read32<E>(loc);
if (E == support::little)
- return (V << 16) | (V >> 16);
- return V;
+ return (v << 16) | (v >> 16);
+ return v;
}
template <endianness E>
-static void writeValue(uint8_t *Loc, uint64_t V, uint8_t BitsSize,
- uint8_t Shift) {
- uint32_t Instr = read32<E>(Loc);
- uint32_t Mask = 0xffffffff >> (32 - BitsSize);
- uint32_t Data = (Instr & ~Mask) | ((V >> Shift) & Mask);
- write32<E>(Loc, Data);
+static void writeValue(uint8_t *loc, uint64_t v, uint8_t bitsSize,
+ uint8_t shift) {
+ uint32_t instr = read32<E>(loc);
+ uint32_t mask = 0xffffffff >> (32 - bitsSize);
+ uint32_t data = (instr & ~mask) | ((v >> shift) & mask);
+ write32<E>(loc, data);
}
template <endianness E>
-static void writeShuffleValue(uint8_t *Loc, uint64_t V, uint8_t BitsSize,
- uint8_t Shift) {
+static void writeShuffleValue(uint8_t *loc, uint64_t v, uint8_t bitsSize,
+ uint8_t shift) {
// See comments in readShuffle for purpose of this code.
- uint16_t *Words = (uint16_t *)Loc;
+ uint16_t *words = (uint16_t *)loc;
if (E == support::little)
- std::swap(Words[0], Words[1]);
+ std::swap(words[0], words[1]);
- writeValue<E>(Loc, V, BitsSize, Shift);
+ writeValue<E>(loc, v, bitsSize, shift);
if (E == support::little)
- std::swap(Words[0], Words[1]);
+ std::swap(words[0], words[1]);
}
template <endianness E>
-static void writeMicroRelocation16(uint8_t *Loc, uint64_t V, uint8_t BitsSize,
- uint8_t Shift) {
- uint16_t Instr = read16<E>(Loc);
- uint16_t Mask = 0xffff >> (16 - BitsSize);
- uint16_t Data = (Instr & ~Mask) | ((V >> Shift) & Mask);
- write16<E>(Loc, Data);
+static void writeMicroRelocation16(uint8_t *loc, uint64_t v, uint8_t bitsSize,
+ uint8_t shift) {
+ uint16_t instr = read16<E>(loc);
+ uint16_t mask = 0xffff >> (16 - bitsSize);
+ uint16_t data = (instr & ~mask) | ((v >> shift) & mask);
+ write16<E>(loc, data);
}
-template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *Buf) const {
- const endianness E = ELFT::TargetEndianness;
+template <class ELFT> void MIPS<ELFT>::writePltHeader(uint8_t *buf) const {
+ const endianness e = ELFT::TargetEndianness;
if (isMicroMips()) {
- uint64_t GotPlt = In.GotPlt->getVA();
- uint64_t Plt = In.Plt->getVA();
+ uint64_t gotPlt = in.gotPlt->getVA();
+ uint64_t plt = in.plt->getVA();
// Overwrite trap instructions written by Writer::writeTrapInstr.
- memset(Buf, 0, PltHeaderSize);
-
- write16<E>(Buf, isMipsR6() ? 0x7860 : 0x7980); // addiupc v1, (GOTPLT) - .
- write16<E>(Buf + 4, 0xff23); // lw $25, 0($3)
- write16<E>(Buf + 8, 0x0535); // subu16 $2, $2, $3
- write16<E>(Buf + 10, 0x2525); // srl16 $2, $2, 2
- write16<E>(Buf + 12, 0x3302); // addiu $24, $2, -2
- write16<E>(Buf + 14, 0xfffe);
- write16<E>(Buf + 16, 0x0dff); // move $15, $31
+ memset(buf, 0, pltHeaderSize);
+
+ write16<e>(buf, isMipsR6() ? 0x7860 : 0x7980); // addiupc v1, (GOTPLT) - .
+ write16<e>(buf + 4, 0xff23); // lw $25, 0($3)
+ write16<e>(buf + 8, 0x0535); // subu16 $2, $2, $3
+ write16<e>(buf + 10, 0x2525); // srl16 $2, $2, 2
+ write16<e>(buf + 12, 0x3302); // addiu $24, $2, -2
+ write16<e>(buf + 14, 0xfffe);
+ write16<e>(buf + 16, 0x0dff); // move $15, $31
if (isMipsR6()) {
- write16<E>(Buf + 18, 0x0f83); // move $28, $3
- write16<E>(Buf + 20, 0x472b); // jalrc $25
- write16<E>(Buf + 22, 0x0c00); // nop
- relocateOne(Buf, R_MICROMIPS_PC19_S2, GotPlt - Plt);
+ write16<e>(buf + 18, 0x0f83); // move $28, $3
+ write16<e>(buf + 20, 0x472b); // jalrc $25
+ write16<e>(buf + 22, 0x0c00); // nop
+ relocateOne(buf, R_MICROMIPS_PC19_S2, gotPlt - plt);
} else {
- write16<E>(Buf + 18, 0x45f9); // jalrc $25
- write16<E>(Buf + 20, 0x0f83); // move $28, $3
- write16<E>(Buf + 22, 0x0c00); // nop
- relocateOne(Buf, R_MICROMIPS_PC23_S2, GotPlt - Plt);
+ write16<e>(buf + 18, 0x45f9); // jalrc $25
+ write16<e>(buf + 20, 0x0f83); // move $28, $3
+ write16<e>(buf + 22, 0x0c00); // nop
+ relocateOne(buf, R_MICROMIPS_PC23_S2, gotPlt - plt);
}
return;
}
- if (Config->MipsN32Abi) {
- write32<E>(Buf, 0x3c0e0000); // lui $14, %hi(&GOTPLT[0])
- write32<E>(Buf + 4, 0x8dd90000); // lw $25, %lo(&GOTPLT[0])($14)
- write32<E>(Buf + 8, 0x25ce0000); // addiu $14, $14, %lo(&GOTPLT[0])
- write32<E>(Buf + 12, 0x030ec023); // subu $24, $24, $14
- write32<E>(Buf + 16, 0x03e07825); // move $15, $31
- write32<E>(Buf + 20, 0x0018c082); // srl $24, $24, 2
+ if (config->mipsN32Abi) {
+ write32<e>(buf, 0x3c0e0000); // lui $14, %hi(&GOTPLT[0])
+ write32<e>(buf + 4, 0x8dd90000); // lw $25, %lo(&GOTPLT[0])($14)
+ write32<e>(buf + 8, 0x25ce0000); // addiu $14, $14, %lo(&GOTPLT[0])
+ write32<e>(buf + 12, 0x030ec023); // subu $24, $24, $14
+ write32<e>(buf + 16, 0x03e07825); // move $15, $31
+ write32<e>(buf + 20, 0x0018c082); // srl $24, $24, 2
} else if (ELFT::Is64Bits) {
- write32<E>(Buf, 0x3c0e0000); // lui $14, %hi(&GOTPLT[0])
- write32<E>(Buf + 4, 0xddd90000); // ld $25, %lo(&GOTPLT[0])($14)
- write32<E>(Buf + 8, 0x25ce0000); // addiu $14, $14, %lo(&GOTPLT[0])
- write32<E>(Buf + 12, 0x030ec023); // subu $24, $24, $14
- write32<E>(Buf + 16, 0x03e07825); // move $15, $31
- write32<E>(Buf + 20, 0x0018c0c2); // srl $24, $24, 3
+ write32<e>(buf, 0x3c0e0000); // lui $14, %hi(&GOTPLT[0])
+ write32<e>(buf + 4, 0xddd90000); // ld $25, %lo(&GOTPLT[0])($14)
+ write32<e>(buf + 8, 0x25ce0000); // addiu $14, $14, %lo(&GOTPLT[0])
+ write32<e>(buf + 12, 0x030ec023); // subu $24, $24, $14
+ write32<e>(buf + 16, 0x03e07825); // move $15, $31
+ write32<e>(buf + 20, 0x0018c0c2); // srl $24, $24, 3
} else {
- write32<E>(Buf, 0x3c1c0000); // lui $28, %hi(&GOTPLT[0])
- write32<E>(Buf + 4, 0x8f990000); // lw $25, %lo(&GOTPLT[0])($28)
- write32<E>(Buf + 8, 0x279c0000); // addiu $28, $28, %lo(&GOTPLT[0])
- write32<E>(Buf + 12, 0x031cc023); // subu $24, $24, $28
- write32<E>(Buf + 16, 0x03e07825); // move $15, $31
- write32<E>(Buf + 20, 0x0018c082); // srl $24, $24, 2
+ write32<e>(buf, 0x3c1c0000); // lui $28, %hi(&GOTPLT[0])
+ write32<e>(buf + 4, 0x8f990000); // lw $25, %lo(&GOTPLT[0])($28)
+ write32<e>(buf + 8, 0x279c0000); // addiu $28, $28, %lo(&GOTPLT[0])
+ write32<e>(buf + 12, 0x031cc023); // subu $24, $24, $28
+ write32<e>(buf + 16, 0x03e07825); // move $15, $31
+ write32<e>(buf + 20, 0x0018c082); // srl $24, $24, 2
}
- uint32_t JalrInst = Config->ZHazardplt ? 0x0320fc09 : 0x0320f809;
- write32<E>(Buf + 24, JalrInst); // jalr.hb $25 or jalr $25
- write32<E>(Buf + 28, 0x2718fffe); // subu $24, $24, 2
+ uint32_t jalrInst = config->zHazardplt ? 0x0320fc09 : 0x0320f809;
+ write32<e>(buf + 24, jalrInst); // jalr.hb $25 or jalr $25
+ write32<e>(buf + 28, 0x2718fffe); // subu $24, $24, 2
- uint64_t GotPlt = In.GotPlt->getVA();
- writeValue<E>(Buf, GotPlt + 0x8000, 16, 16);
- writeValue<E>(Buf + 4, GotPlt, 16, 0);
- writeValue<E>(Buf + 8, GotPlt, 16, 0);
+ uint64_t gotPlt = in.gotPlt->getVA();
+ writeValue<e>(buf, gotPlt + 0x8000, 16, 16);
+ writeValue<e>(buf + 4, gotPlt, 16, 0);
+ writeValue<e>(buf + 8, gotPlt, 16, 0);
}
template <class ELFT>
-void MIPS<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const endianness E = ELFT::TargetEndianness;
+void MIPS<ELFT>::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const endianness e = ELFT::TargetEndianness;
if (isMicroMips()) {
// Overwrite trap instructions written by Writer::writeTrapInstr.
- memset(Buf, 0, PltEntrySize);
+ memset(buf, 0, pltEntrySize);
if (isMipsR6()) {
- write16<E>(Buf, 0x7840); // addiupc $2, (GOTPLT) - .
- write16<E>(Buf + 4, 0xff22); // lw $25, 0($2)
- write16<E>(Buf + 8, 0x0f02); // move $24, $2
- write16<E>(Buf + 10, 0x4723); // jrc $25 / jr16 $25
- relocateOne(Buf, R_MICROMIPS_PC19_S2, GotPltEntryAddr - PltEntryAddr);
+ write16<e>(buf, 0x7840); // addiupc $2, (GOTPLT) - .
+ write16<e>(buf + 4, 0xff22); // lw $25, 0($2)
+ write16<e>(buf + 8, 0x0f02); // move $24, $2
+ write16<e>(buf + 10, 0x4723); // jrc $25 / jr16 $25
+ relocateOne(buf, R_MICROMIPS_PC19_S2, gotPltEntryAddr - pltEntryAddr);
} else {
- write16<E>(Buf, 0x7900); // addiupc $2, (GOTPLT) - .
- write16<E>(Buf + 4, 0xff22); // lw $25, 0($2)
- write16<E>(Buf + 8, 0x4599); // jrc $25 / jr16 $25
- write16<E>(Buf + 10, 0x0f02); // move $24, $2
- relocateOne(Buf, R_MICROMIPS_PC23_S2, GotPltEntryAddr - PltEntryAddr);
+ write16<e>(buf, 0x7900); // addiupc $2, (GOTPLT) - .
+ write16<e>(buf + 4, 0xff22); // lw $25, 0($2)
+ write16<e>(buf + 8, 0x4599); // jrc $25 / jr16 $25
+ write16<e>(buf + 10, 0x0f02); // move $24, $2
+ relocateOne(buf, R_MICROMIPS_PC23_S2, gotPltEntryAddr - pltEntryAddr);
}
return;
}
- uint32_t JrInst = isMipsR6() ? (Config->ZHazardplt ? 0x03200409 : 0x03200009)
- : (Config->ZHazardplt ? 0x03200408 : 0x03200008);
-
- write32<E>(Buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry)
- write32<E>(Buf + 4, 0x8df90000); // l[wd] $25, %lo(.got.plt entry)($15)
- write32<E>(Buf + 8, JrInst); // jr $25 / jr.hb $25
- write32<E>(Buf + 12, 0x25f80000); // addiu $24, $15, %lo(.got.plt entry)
- writeValue<E>(Buf, GotPltEntryAddr + 0x8000, 16, 16);
- writeValue<E>(Buf + 4, GotPltEntryAddr, 16, 0);
- writeValue<E>(Buf + 12, GotPltEntryAddr, 16, 0);
+ uint32_t loadInst = ELFT::Is64Bits ? 0xddf90000 : 0x8df90000;
+ uint32_t jrInst = isMipsR6() ? (config->zHazardplt ? 0x03200409 : 0x03200009)
+ : (config->zHazardplt ? 0x03200408 : 0x03200008);
+ uint32_t addInst = ELFT::Is64Bits ? 0x65f80000 : 0x25f80000;
+
+ write32<e>(buf, 0x3c0f0000); // lui $15, %hi(.got.plt entry)
+ write32<e>(buf + 4, loadInst); // l[wd] $25, %lo(.got.plt entry)($15)
+ write32<e>(buf + 8, jrInst); // jr $25 / jr.hb $25
+ write32<e>(buf + 12, addInst); // [d]addiu $24, $15, %lo(.got.plt entry)
+ writeValue<e>(buf, gotPltEntryAddr + 0x8000, 16, 16);
+ writeValue<e>(buf + 4, gotPltEntryAddr, 16, 0);
+ writeValue<e>(buf + 12, gotPltEntryAddr, 16, 0);
}
template <class ELFT>
-bool MIPS<ELFT>::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const {
+bool MIPS<ELFT>::needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const {
// Any MIPS PIC code function is invoked with its address in register $t9.
// So if we have a branch instruction from non-PIC code to the PIC one
// we cannot make the jump directly and need to create a small stubs
// to save the target function address.
// See page 3-38 ftp://www.linux-mips.org/pub/linux/mips/doc/ABI/mipsabi.pdf
- if (Type != R_MIPS_26 && Type != R_MICROMIPS_26_S1 &&
- Type != R_MICROMIPS_PC26_S1)
+ if (type != R_MIPS_26 && type != R_MIPS_PC26_S2 &&
+ type != R_MICROMIPS_26_S1 && type != R_MICROMIPS_PC26_S1)
return false;
- auto *F = dyn_cast_or_null<ELFFileBase<ELFT>>(File);
- if (!F)
+ auto *f = dyn_cast_or_null<ObjFile<ELFT>>(file);
+ if (!f)
return false;
// If current file has PIC code, LA25 stub is not required.
- if (F->getObj().getHeader()->e_flags & EF_MIPS_PIC)
+ if (f->getObj().getHeader()->e_flags & EF_MIPS_PIC)
return false;
- auto *D = dyn_cast<Defined>(&S);
+ auto *d = dyn_cast<Defined>(&s);
// LA25 is required if target file has PIC code
// or target symbol is a PIC symbol.
- return D && isMipsPIC<ELFT>(D);
+ return d && isMipsPIC<ELFT>(d);
}
template <class ELFT>
-int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
- const endianness E = ELFT::TargetEndianness;
- switch (Type) {
+int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ const endianness e = ELFT::TargetEndianness;
+ switch (type) {
case R_MIPS_32:
case R_MIPS_GPREL32:
case R_MIPS_TLS_DTPREL32:
case R_MIPS_TLS_TPREL32:
- return SignExtend64<32>(read32<E>(Buf));
+ return SignExtend64<32>(read32<e>(buf));
case R_MIPS_26:
// FIXME (simon): If the relocation target symbol is not a PLT entry
// we should use another expression for calculation:
// ((A << 2) | (P & 0xf0000000)) >> 2
- return SignExtend64<28>(read32<E>(Buf) << 2);
+ return SignExtend64<28>(read32<e>(buf) << 2);
case R_MIPS_GOT16:
case R_MIPS_HI16:
case R_MIPS_PCHI16:
- return SignExtend64<16>(read32<E>(Buf)) << 16;
+ return SignExtend64<16>(read32<e>(buf)) << 16;
case R_MIPS_GPREL16:
case R_MIPS_LO16:
case R_MIPS_PCLO16:
@@ -386,54 +389,54 @@ int64_t MIPS<ELFT>::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
case R_MIPS_TLS_DTPREL_LO16:
case R_MIPS_TLS_TPREL_HI16:
case R_MIPS_TLS_TPREL_LO16:
- return SignExtend64<16>(read32<E>(Buf));
+ return SignExtend64<16>(read32<e>(buf));
case R_MICROMIPS_GOT16:
case R_MICROMIPS_HI16:
- return SignExtend64<16>(readShuffle<E>(Buf)) << 16;
+ return SignExtend64<16>(readShuffle<e>(buf)) << 16;
case R_MICROMIPS_GPREL16:
case R_MICROMIPS_LO16:
case R_MICROMIPS_TLS_DTPREL_HI16:
case R_MICROMIPS_TLS_DTPREL_LO16:
case R_MICROMIPS_TLS_TPREL_HI16:
case R_MICROMIPS_TLS_TPREL_LO16:
- return SignExtend64<16>(readShuffle<E>(Buf));
+ return SignExtend64<16>(readShuffle<e>(buf));
case R_MICROMIPS_GPREL7_S2:
- return SignExtend64<9>(readShuffle<E>(Buf) << 2);
+ return SignExtend64<9>(readShuffle<e>(buf) << 2);
case R_MIPS_PC16:
- return SignExtend64<18>(read32<E>(Buf) << 2);
+ return SignExtend64<18>(read32<e>(buf) << 2);
case R_MIPS_PC19_S2:
- return SignExtend64<21>(read32<E>(Buf) << 2);
+ return SignExtend64<21>(read32<e>(buf) << 2);
case R_MIPS_PC21_S2:
- return SignExtend64<23>(read32<E>(Buf) << 2);
+ return SignExtend64<23>(read32<e>(buf) << 2);
case R_MIPS_PC26_S2:
- return SignExtend64<28>(read32<E>(Buf) << 2);
+ return SignExtend64<28>(read32<e>(buf) << 2);
case R_MIPS_PC32:
- return SignExtend64<32>(read32<E>(Buf));
+ return SignExtend64<32>(read32<e>(buf));
case R_MICROMIPS_26_S1:
- return SignExtend64<27>(readShuffle<E>(Buf) << 1);
+ return SignExtend64<27>(readShuffle<e>(buf) << 1);
case R_MICROMIPS_PC7_S1:
- return SignExtend64<8>(read16<E>(Buf) << 1);
+ return SignExtend64<8>(read16<e>(buf) << 1);
case R_MICROMIPS_PC10_S1:
- return SignExtend64<11>(read16<E>(Buf) << 1);
+ return SignExtend64<11>(read16<e>(buf) << 1);
case R_MICROMIPS_PC16_S1:
- return SignExtend64<17>(readShuffle<E>(Buf) << 1);
+ return SignExtend64<17>(readShuffle<e>(buf) << 1);
case R_MICROMIPS_PC18_S3:
- return SignExtend64<21>(readShuffle<E>(Buf) << 3);
+ return SignExtend64<21>(readShuffle<e>(buf) << 3);
case R_MICROMIPS_PC19_S2:
- return SignExtend64<21>(readShuffle<E>(Buf) << 2);
+ return SignExtend64<21>(readShuffle<e>(buf) << 2);
case R_MICROMIPS_PC21_S1:
- return SignExtend64<22>(readShuffle<E>(Buf) << 1);
+ return SignExtend64<22>(readShuffle<e>(buf) << 1);
case R_MICROMIPS_PC23_S2:
- return SignExtend64<25>(readShuffle<E>(Buf) << 2);
+ return SignExtend64<25>(readShuffle<e>(buf) << 2);
case R_MICROMIPS_PC26_S1:
- return SignExtend64<27>(readShuffle<E>(Buf) << 1);
+ return SignExtend64<27>(readShuffle<e>(buf) << 1);
default:
return 0;
}
}
static std::pair<uint32_t, uint64_t>
-calculateMipsRelChain(uint8_t *Loc, RelType Type, uint64_t Val) {
+calculateMipsRelChain(uint8_t *loc, RelType type, uint64_t val) {
// MIPS N64 ABI packs multiple relocations into the single relocation
// record. In general, all up to three relocations can have arbitrary
// types. In fact, Clang and GCC uses only a few combinations. For now,
@@ -446,72 +449,134 @@ calculateMipsRelChain(uint8_t *Loc, RelType Type, uint64_t Val) {
// relocations used to modify result of the first one: extend it to
// 64-bit, extract high or low part etc. For details, see part 2.9 Relocation
// at the https://dmz-portal.mips.com/mw/images/8/82/007-4658-001.pdf
- RelType Type2 = (Type >> 8) & 0xff;
- RelType Type3 = (Type >> 16) & 0xff;
- if (Type2 == R_MIPS_NONE && Type3 == R_MIPS_NONE)
- return std::make_pair(Type, Val);
- if (Type2 == R_MIPS_64 && Type3 == R_MIPS_NONE)
- return std::make_pair(Type2, Val);
- if (Type2 == R_MIPS_SUB && (Type3 == R_MIPS_HI16 || Type3 == R_MIPS_LO16))
- return std::make_pair(Type3, -Val);
- error(getErrorLocation(Loc) + "unsupported relocations combination " +
- Twine(Type));
- return std::make_pair(Type & 0xff, Val);
+ RelType type2 = (type >> 8) & 0xff;
+ RelType type3 = (type >> 16) & 0xff;
+ if (type2 == R_MIPS_NONE && type3 == R_MIPS_NONE)
+ return std::make_pair(type, val);
+ if (type2 == R_MIPS_64 && type3 == R_MIPS_NONE)
+ return std::make_pair(type2, val);
+ if (type2 == R_MIPS_SUB && (type3 == R_MIPS_HI16 || type3 == R_MIPS_LO16))
+ return std::make_pair(type3, -val);
+ error(getErrorLocation(loc) + "unsupported relocations combination " +
+ Twine(type));
+ return std::make_pair(type & 0xff, val);
+}
+
+static bool isBranchReloc(RelType type) {
+ return type == R_MIPS_26 || type == R_MIPS_PC26_S2 ||
+ type == R_MIPS_PC21_S2 || type == R_MIPS_PC16;
+}
+
+static bool isMicroBranchReloc(RelType type) {
+ return type == R_MICROMIPS_26_S1 || type == R_MICROMIPS_PC16_S1 ||
+ type == R_MICROMIPS_PC10_S1 || type == R_MICROMIPS_PC7_S1;
}
template <class ELFT>
-void MIPS<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- const endianness E = ELFT::TargetEndianness;
+static uint64_t fixupCrossModeJump(uint8_t *loc, RelType type, uint64_t val) {
+ // Here we need to detect jump/branch from regular MIPS code
+ // to a microMIPS target and vice versa. In that cases jump
+ // instructions need to be replaced by their "cross-mode"
+ // equivalents.
+ const endianness e = ELFT::TargetEndianness;
+ bool isMicroTgt = val & 0x1;
+ bool isCrossJump = (isMicroTgt && isBranchReloc(type)) ||
+ (!isMicroTgt && isMicroBranchReloc(type));
+ if (!isCrossJump)
+ return val;
+
+ switch (type) {
+ case R_MIPS_26: {
+ uint32_t inst = read32<e>(loc) >> 26;
+ if (inst == 0x3 || inst == 0x1d) { // JAL or JALX
+ writeValue<e>(loc, 0x1d << 26, 32, 0);
+ return val;
+ }
+ break;
+ }
+ case R_MICROMIPS_26_S1: {
+ uint32_t inst = readShuffle<e>(loc) >> 26;
+ if (inst == 0x3d || inst == 0x3c) { // JAL32 or JALX32
+ val >>= 1;
+ writeShuffleValue<e>(loc, 0x3c << 26, 32, 0);
+ return val;
+ }
+ break;
+ }
+ case R_MIPS_PC26_S2:
+ case R_MIPS_PC21_S2:
+ case R_MIPS_PC16:
+ case R_MICROMIPS_PC16_S1:
+ case R_MICROMIPS_PC10_S1:
+ case R_MICROMIPS_PC7_S1:
+ // FIXME (simon): Support valid branch relocations.
+ break;
+ default:
+ llvm_unreachable("unexpected jump/branch relocation");
+ }
+
+ error(getErrorLocation(loc) +
+ "unsupported jump/branch instruction between ISA modes referenced by " +
+ toString(type) + " relocation");
+ return val;
+}
+
+template <class ELFT>
+void MIPS<ELFT>::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ const endianness e = ELFT::TargetEndianness;
+
+ if (ELFT::Is64Bits || config->mipsN32Abi)
+ std::tie(type, val) = calculateMipsRelChain(loc, type, val);
- if (ELFT::Is64Bits || Config->MipsN32Abi)
- std::tie(Type, Val) = calculateMipsRelChain(Loc, Type, Val);
+ // Detect cross-mode jump/branch and fix instruction.
+ val = fixupCrossModeJump<ELFT>(loc, type, val);
// Thread pointer and DRP offsets from the start of TLS data area.
// https://www.linux-mips.org/wiki/NPTL
- if (Type == R_MIPS_TLS_DTPREL_HI16 || Type == R_MIPS_TLS_DTPREL_LO16 ||
- Type == R_MIPS_TLS_DTPREL32 || Type == R_MIPS_TLS_DTPREL64 ||
- Type == R_MICROMIPS_TLS_DTPREL_HI16 ||
- Type == R_MICROMIPS_TLS_DTPREL_LO16) {
- Val -= 0x8000;
- } else if (Type == R_MIPS_TLS_TPREL_HI16 || Type == R_MIPS_TLS_TPREL_LO16 ||
- Type == R_MIPS_TLS_TPREL32 || Type == R_MIPS_TLS_TPREL64 ||
- Type == R_MICROMIPS_TLS_TPREL_HI16 ||
- Type == R_MICROMIPS_TLS_TPREL_LO16) {
- Val -= 0x7000;
+ if (type == R_MIPS_TLS_DTPREL_HI16 || type == R_MIPS_TLS_DTPREL_LO16 ||
+ type == R_MIPS_TLS_DTPREL32 || type == R_MIPS_TLS_DTPREL64 ||
+ type == R_MICROMIPS_TLS_DTPREL_HI16 ||
+ type == R_MICROMIPS_TLS_DTPREL_LO16) {
+ val -= 0x8000;
+ } else if (type == R_MIPS_TLS_TPREL_HI16 || type == R_MIPS_TLS_TPREL_LO16 ||
+ type == R_MIPS_TLS_TPREL32 || type == R_MIPS_TLS_TPREL64 ||
+ type == R_MICROMIPS_TLS_TPREL_HI16 ||
+ type == R_MICROMIPS_TLS_TPREL_LO16) {
+ val -= 0x7000;
}
- switch (Type) {
+ switch (type) {
case R_MIPS_32:
case R_MIPS_GPREL32:
case R_MIPS_TLS_DTPREL32:
case R_MIPS_TLS_TPREL32:
- write32<E>(Loc, Val);
+ write32<e>(loc, val);
break;
case R_MIPS_64:
case R_MIPS_TLS_DTPREL64:
case R_MIPS_TLS_TPREL64:
- write64<E>(Loc, Val);
+ write64<e>(loc, val);
break;
case R_MIPS_26:
- writeValue<E>(Loc, Val, 26, 2);
+ writeValue<e>(loc, val, 26, 2);
break;
case R_MIPS_GOT16:
// The R_MIPS_GOT16 relocation's value in "relocatable" linking mode
// is updated addend (not a GOT index). In that case write high 16 bits
// to store a correct addend value.
- if (Config->Relocatable) {
- writeValue<E>(Loc, Val + 0x8000, 16, 16);
+ if (config->relocatable) {
+ writeValue<e>(loc, val + 0x8000, 16, 16);
} else {
- checkInt(Loc, Val, 16, Type);
- writeValue<E>(Loc, Val, 16, 0);
+ checkInt(loc, val, 16, type);
+ writeValue<e>(loc, val, 16, 0);
}
break;
case R_MICROMIPS_GOT16:
- if (Config->Relocatable) {
- writeShuffleValue<E>(Loc, Val + 0x8000, 16, 16);
+ if (config->relocatable) {
+ writeShuffleValue<e>(loc, val + 0x8000, 16, 16);
} else {
- checkInt(Loc, Val, 16, Type);
- writeShuffleValue<E>(Loc, Val, 16, 0);
+ checkInt(loc, val, 16, type);
+ writeShuffleValue<e>(loc, val, 16, 0);
}
break;
case R_MIPS_CALL16:
@@ -521,7 +586,7 @@ void MIPS<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_MIPS_TLS_GD:
case R_MIPS_TLS_GOTTPREL:
case R_MIPS_TLS_LDM:
- checkInt(Loc, Val, 16, Type);
+ checkInt(loc, val, 16, type);
LLVM_FALLTHROUGH;
case R_MIPS_CALL_LO16:
case R_MIPS_GOT_LO16:
@@ -530,13 +595,13 @@ void MIPS<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_MIPS_PCLO16:
case R_MIPS_TLS_DTPREL_LO16:
case R_MIPS_TLS_TPREL_LO16:
- writeValue<E>(Loc, Val, 16, 0);
+ writeValue<e>(loc, val, 16, 0);
break;
case R_MICROMIPS_GPREL16:
case R_MICROMIPS_TLS_GD:
case R_MICROMIPS_TLS_LDM:
- checkInt(Loc, Val, 16, Type);
- writeShuffleValue<E>(Loc, Val, 16, 0);
+ checkInt(loc, val, 16, type);
+ writeShuffleValue<e>(loc, val, 16, 0);
break;
case R_MICROMIPS_CALL16:
case R_MICROMIPS_CALL_LO16:
@@ -544,11 +609,11 @@ void MIPS<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_MICROMIPS_TLS_DTPREL_LO16:
case R_MICROMIPS_TLS_GOTTPREL:
case R_MICROMIPS_TLS_TPREL_LO16:
- writeShuffleValue<E>(Loc, Val, 16, 0);
+ writeShuffleValue<e>(loc, val, 16, 0);
break;
case R_MICROMIPS_GPREL7_S2:
- checkInt(Loc, Val, 7, Type);
- writeShuffleValue<E>(Loc, Val, 7, 2);
+ checkInt(loc, val, 7, type);
+ writeShuffleValue<e>(loc, val, 7, 2);
break;
case R_MIPS_CALL_HI16:
case R_MIPS_GOT_HI16:
@@ -556,113 +621,113 @@ void MIPS<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_MIPS_PCHI16:
case R_MIPS_TLS_DTPREL_HI16:
case R_MIPS_TLS_TPREL_HI16:
- writeValue<E>(Loc, Val + 0x8000, 16, 16);
+ writeValue<e>(loc, val + 0x8000, 16, 16);
break;
case R_MICROMIPS_CALL_HI16:
case R_MICROMIPS_GOT_HI16:
case R_MICROMIPS_HI16:
case R_MICROMIPS_TLS_DTPREL_HI16:
case R_MICROMIPS_TLS_TPREL_HI16:
- writeShuffleValue<E>(Loc, Val + 0x8000, 16, 16);
+ writeShuffleValue<e>(loc, val + 0x8000, 16, 16);
break;
case R_MIPS_HIGHER:
- writeValue<E>(Loc, Val + 0x80008000, 16, 32);
+ writeValue<e>(loc, val + 0x80008000, 16, 32);
break;
case R_MIPS_HIGHEST:
- writeValue<E>(Loc, Val + 0x800080008000, 16, 48);
+ writeValue<e>(loc, val + 0x800080008000, 16, 48);
break;
case R_MIPS_JALR:
case R_MICROMIPS_JALR:
// Ignore this optimization relocation for now
break;
case R_MIPS_PC16:
- checkAlignment(Loc, Val, 4, Type);
- checkInt(Loc, Val, 18, Type);
- writeValue<E>(Loc, Val, 16, 2);
+ checkAlignment(loc, val, 4, type);
+ checkInt(loc, val, 18, type);
+ writeValue<e>(loc, val, 16, 2);
break;
case R_MIPS_PC19_S2:
- checkAlignment(Loc, Val, 4, Type);
- checkInt(Loc, Val, 21, Type);
- writeValue<E>(Loc, Val, 19, 2);
+ checkAlignment(loc, val, 4, type);
+ checkInt(loc, val, 21, type);
+ writeValue<e>(loc, val, 19, 2);
break;
case R_MIPS_PC21_S2:
- checkAlignment(Loc, Val, 4, Type);
- checkInt(Loc, Val, 23, Type);
- writeValue<E>(Loc, Val, 21, 2);
+ checkAlignment(loc, val, 4, type);
+ checkInt(loc, val, 23, type);
+ writeValue<e>(loc, val, 21, 2);
break;
case R_MIPS_PC26_S2:
- checkAlignment(Loc, Val, 4, Type);
- checkInt(Loc, Val, 28, Type);
- writeValue<E>(Loc, Val, 26, 2);
+ checkAlignment(loc, val, 4, type);
+ checkInt(loc, val, 28, type);
+ writeValue<e>(loc, val, 26, 2);
break;
case R_MIPS_PC32:
- writeValue<E>(Loc, Val, 32, 0);
+ writeValue<e>(loc, val, 32, 0);
break;
case R_MICROMIPS_26_S1:
case R_MICROMIPS_PC26_S1:
- checkInt(Loc, Val, 27, Type);
- writeShuffleValue<E>(Loc, Val, 26, 1);
+ checkInt(loc, val, 27, type);
+ writeShuffleValue<e>(loc, val, 26, 1);
break;
case R_MICROMIPS_PC7_S1:
- checkInt(Loc, Val, 8, Type);
- writeMicroRelocation16<E>(Loc, Val, 7, 1);
+ checkInt(loc, val, 8, type);
+ writeMicroRelocation16<e>(loc, val, 7, 1);
break;
case R_MICROMIPS_PC10_S1:
- checkInt(Loc, Val, 11, Type);
- writeMicroRelocation16<E>(Loc, Val, 10, 1);
+ checkInt(loc, val, 11, type);
+ writeMicroRelocation16<e>(loc, val, 10, 1);
break;
case R_MICROMIPS_PC16_S1:
- checkInt(Loc, Val, 17, Type);
- writeShuffleValue<E>(Loc, Val, 16, 1);
+ checkInt(loc, val, 17, type);
+ writeShuffleValue<e>(loc, val, 16, 1);
break;
case R_MICROMIPS_PC18_S3:
- checkInt(Loc, Val, 21, Type);
- writeShuffleValue<E>(Loc, Val, 18, 3);
+ checkInt(loc, val, 21, type);
+ writeShuffleValue<e>(loc, val, 18, 3);
break;
case R_MICROMIPS_PC19_S2:
- checkInt(Loc, Val, 21, Type);
- writeShuffleValue<E>(Loc, Val, 19, 2);
+ checkInt(loc, val, 21, type);
+ writeShuffleValue<e>(loc, val, 19, 2);
break;
case R_MICROMIPS_PC21_S1:
- checkInt(Loc, Val, 22, Type);
- writeShuffleValue<E>(Loc, Val, 21, 1);
+ checkInt(loc, val, 22, type);
+ writeShuffleValue<e>(loc, val, 21, 1);
break;
case R_MICROMIPS_PC23_S2:
- checkInt(Loc, Val, 25, Type);
- writeShuffleValue<E>(Loc, Val, 23, 2);
+ checkInt(loc, val, 25, type);
+ writeShuffleValue<e>(loc, val, 23, 2);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unknown relocation");
}
}
-template <class ELFT> bool MIPS<ELFT>::usesOnlyLowPageBits(RelType Type) const {
- return Type == R_MIPS_LO16 || Type == R_MIPS_GOT_OFST ||
- Type == R_MICROMIPS_LO16;
+template <class ELFT> bool MIPS<ELFT>::usesOnlyLowPageBits(RelType type) const {
+ return type == R_MIPS_LO16 || type == R_MIPS_GOT_OFST ||
+ type == R_MICROMIPS_LO16;
}
// Return true if the symbol is a PIC function.
-template <class ELFT> bool elf::isMipsPIC(const Defined *Sym) {
- if (!Sym->isFunc())
+template <class ELFT> bool elf::isMipsPIC(const Defined *sym) {
+ if (!sym->isFunc())
return false;
- if (Sym->StOther & STO_MIPS_PIC)
+ if (sym->stOther & STO_MIPS_PIC)
return true;
- if (!Sym->Section)
+ if (!sym->section)
return false;
- ObjFile<ELFT> *File =
- cast<InputSectionBase>(Sym->Section)->template getFile<ELFT>();
- if (!File)
+ ObjFile<ELFT> *file =
+ cast<InputSectionBase>(sym->section)->template getFile<ELFT>();
+ if (!file)
return false;
- return File->getObj().getHeader()->e_flags & EF_MIPS_PIC;
+ return file->getObj().getHeader()->e_flags & EF_MIPS_PIC;
}
template <class ELFT> TargetInfo *elf::getMipsTargetInfo() {
- static MIPS<ELFT> Target;
- return &Target;
+ static MIPS<ELFT> target;
+ return &target;
}
template TargetInfo *elf::getMipsTargetInfo<ELF32LE>();
diff --git a/contrib/llvm/tools/lld/ELF/Arch/MipsArchTree.cpp b/contrib/llvm/tools/lld/ELF/Arch/MipsArchTree.cpp
index 98ceac3075e0..f64d03756457 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/MipsArchTree.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/MipsArchTree.cpp
@@ -1,9 +1,8 @@
//===- MipsArchTree.cpp --------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===---------------------------------------------------------------------===//
//
@@ -29,18 +28,18 @@ using namespace lld::elf;
namespace {
struct ArchTreeEdge {
- uint32_t Child;
- uint32_t Parent;
+ uint32_t child;
+ uint32_t parent;
};
struct FileFlags {
- InputFile *File;
- uint32_t Flags;
+ InputFile *file;
+ uint32_t flags;
};
} // namespace
-static StringRef getAbiName(uint32_t Flags) {
- switch (Flags) {
+static StringRef getAbiName(uint32_t flags) {
+ switch (flags) {
case 0:
return "n64";
case EF_MIPS_ABI2:
@@ -58,76 +57,76 @@ static StringRef getAbiName(uint32_t Flags) {
}
}
-static StringRef getNanName(bool IsNan2008) {
- return IsNan2008 ? "2008" : "legacy";
+static StringRef getNanName(bool isNan2008) {
+ return isNan2008 ? "2008" : "legacy";
}
-static StringRef getFpName(bool IsFp64) { return IsFp64 ? "64" : "32"; }
+static StringRef getFpName(bool isFp64) { return isFp64 ? "64" : "32"; }
-static void checkFlags(ArrayRef<FileFlags> Files) {
- assert(!Files.empty() && "expected non-empty file list");
+static void checkFlags(ArrayRef<FileFlags> files) {
+ assert(!files.empty() && "expected non-empty file list");
- uint32_t ABI = Files[0].Flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
- bool Nan = Files[0].Flags & EF_MIPS_NAN2008;
- bool Fp = Files[0].Flags & EF_MIPS_FP64;
+ uint32_t abi = files[0].flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
+ bool nan = files[0].flags & EF_MIPS_NAN2008;
+ bool fp = files[0].flags & EF_MIPS_FP64;
- for (const FileFlags &F : Files) {
- if (Config->Is64 && F.Flags & EF_MIPS_MICROMIPS)
- error(toString(F.File) + ": microMIPS 64-bit is not supported");
+ for (const FileFlags &f : files) {
+ if (config->is64 && f.flags & EF_MIPS_MICROMIPS)
+ error(toString(f.file) + ": microMIPS 64-bit is not supported");
- uint32_t ABI2 = F.Flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
- if (ABI != ABI2)
- error(toString(F.File) + ": ABI '" + getAbiName(ABI2) +
- "' is incompatible with target ABI '" + getAbiName(ABI) + "'");
+ uint32_t abi2 = f.flags & (EF_MIPS_ABI | EF_MIPS_ABI2);
+ if (abi != abi2)
+ error(toString(f.file) + ": ABI '" + getAbiName(abi2) +
+ "' is incompatible with target ABI '" + getAbiName(abi) + "'");
- bool Nan2 = F.Flags & EF_MIPS_NAN2008;
- if (Nan != Nan2)
- error(toString(F.File) + ": -mnan=" + getNanName(Nan2) +
- " is incompatible with target -mnan=" + getNanName(Nan));
+ bool nan2 = f.flags & EF_MIPS_NAN2008;
+ if (nan != nan2)
+ error(toString(f.file) + ": -mnan=" + getNanName(nan2) +
+ " is incompatible with target -mnan=" + getNanName(nan));
- bool Fp2 = F.Flags & EF_MIPS_FP64;
- if (Fp != Fp2)
- error(toString(F.File) + ": -mfp" + getFpName(Fp2) +
- " is incompatible with target -mfp" + getFpName(Fp));
+ bool fp2 = f.flags & EF_MIPS_FP64;
+ if (fp != fp2)
+ error(toString(f.file) + ": -mfp" + getFpName(fp2) +
+ " is incompatible with target -mfp" + getFpName(fp));
}
}
-static uint32_t getMiscFlags(ArrayRef<FileFlags> Files) {
- uint32_t Ret = 0;
- for (const FileFlags &F : Files)
- Ret |= F.Flags &
+static uint32_t getMiscFlags(ArrayRef<FileFlags> files) {
+ uint32_t ret = 0;
+ for (const FileFlags &f : files)
+ ret |= f.flags &
(EF_MIPS_ABI | EF_MIPS_ABI2 | EF_MIPS_ARCH_ASE | EF_MIPS_NOREORDER |
EF_MIPS_MICROMIPS | EF_MIPS_NAN2008 | EF_MIPS_32BITMODE);
- return Ret;
+ return ret;
}
-static uint32_t getPicFlags(ArrayRef<FileFlags> Files) {
+static uint32_t getPicFlags(ArrayRef<FileFlags> files) {
// Check PIC/non-PIC compatibility.
- bool IsPic = Files[0].Flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
- for (const FileFlags &F : Files.slice(1)) {
- bool IsPic2 = F.Flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
- if (IsPic && !IsPic2)
- warn(toString(F.File) +
+ bool isPic = files[0].flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
+ for (const FileFlags &f : files.slice(1)) {
+ bool isPic2 = f.flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
+ if (isPic && !isPic2)
+ warn(toString(f.file) +
": linking non-abicalls code with abicalls code " +
- toString(Files[0].File));
- if (!IsPic && IsPic2)
- warn(toString(F.File) +
+ toString(files[0].file));
+ if (!isPic && isPic2)
+ warn(toString(f.file) +
": linking abicalls code with non-abicalls code " +
- toString(Files[0].File));
+ toString(files[0].file));
}
// Compute the result PIC/non-PIC flag.
- uint32_t Ret = Files[0].Flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
- for (const FileFlags &F : Files.slice(1))
- Ret &= F.Flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
+ uint32_t ret = files[0].flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
+ for (const FileFlags &f : files.slice(1))
+ ret &= f.flags & (EF_MIPS_PIC | EF_MIPS_CPIC);
// PIC code is inherently CPIC and may not set CPIC flag explicitly.
- if (Ret & EF_MIPS_PIC)
- Ret |= EF_MIPS_CPIC;
- return Ret;
+ if (ret & EF_MIPS_PIC)
+ ret |= EF_MIPS_CPIC;
+ return ret;
}
-static ArchTreeEdge ArchTree[] = {
+static ArchTreeEdge archTree[] = {
// MIPS32R6 and MIPS64R6 are not compatible with other extensions
// MIPS64R2 extensions.
{EF_MIPS_ARCH_64R2 | EF_MIPS_MACH_OCTEON3, EF_MIPS_ARCH_64R2},
@@ -167,25 +166,25 @@ static ArchTreeEdge ArchTree[] = {
{EF_MIPS_ARCH_2, EF_MIPS_ARCH_1},
};
-static bool isArchMatched(uint32_t New, uint32_t Res) {
- if (New == Res)
+static bool isArchMatched(uint32_t New, uint32_t res) {
+ if (New == res)
return true;
- if (New == EF_MIPS_ARCH_32 && isArchMatched(EF_MIPS_ARCH_64, Res))
+ if (New == EF_MIPS_ARCH_32 && isArchMatched(EF_MIPS_ARCH_64, res))
return true;
- if (New == EF_MIPS_ARCH_32R2 && isArchMatched(EF_MIPS_ARCH_64R2, Res))
+ if (New == EF_MIPS_ARCH_32R2 && isArchMatched(EF_MIPS_ARCH_64R2, res))
return true;
- for (const auto &Edge : ArchTree) {
- if (Res == Edge.Child) {
- Res = Edge.Parent;
- if (Res == New)
+ for (const auto &edge : archTree) {
+ if (res == edge.child) {
+ res = edge.parent;
+ if (res == New)
return true;
}
}
return false;
}
-static StringRef getMachName(uint32_t Flags) {
- switch (Flags & EF_MIPS_MACH) {
+static StringRef getMachName(uint32_t flags) {
+ switch (flags & EF_MIPS_MACH) {
case EF_MIPS_MACH_NONE:
return "";
case EF_MIPS_MACH_3900:
@@ -229,8 +228,8 @@ static StringRef getMachName(uint32_t Flags) {
}
}
-static StringRef getArchName(uint32_t Flags) {
- switch (Flags & EF_MIPS_ARCH) {
+static StringRef getArchName(uint32_t flags) {
+ switch (flags & EF_MIPS_ARCH) {
case EF_MIPS_ARCH_1:
return "mips1";
case EF_MIPS_ARCH_2:
@@ -258,12 +257,12 @@ static StringRef getArchName(uint32_t Flags) {
}
}
-static std::string getFullArchName(uint32_t Flags) {
- StringRef Arch = getArchName(Flags);
- StringRef Mach = getMachName(Flags);
- if (Mach.empty())
- return Arch.str();
- return (Arch + " (" + Mach + ")").str();
+static std::string getFullArchName(uint32_t flags) {
+ StringRef arch = getArchName(flags);
+ StringRef mach = getMachName(flags);
+ if (mach.empty())
+ return arch.str();
+ return (arch + " (" + mach + ")").str();
}
// There are (arguably too) many MIPS ISAs out there. Their relationships
@@ -275,55 +274,55 @@ static std::string getFullArchName(uint32_t Flags) {
// Output file gets EF_MIPS_ARCH_2 flag. From the other side mips3 and mips32
// are incompatible because nor mips3 is a parent for misp32, nor mips32
// is a parent for mips3.
-static uint32_t getArchFlags(ArrayRef<FileFlags> Files) {
- uint32_t Ret = Files[0].Flags & (EF_MIPS_ARCH | EF_MIPS_MACH);
+static uint32_t getArchFlags(ArrayRef<FileFlags> files) {
+ uint32_t ret = files[0].flags & (EF_MIPS_ARCH | EF_MIPS_MACH);
- for (const FileFlags &F : Files.slice(1)) {
- uint32_t New = F.Flags & (EF_MIPS_ARCH | EF_MIPS_MACH);
+ for (const FileFlags &f : files.slice(1)) {
+ uint32_t New = f.flags & (EF_MIPS_ARCH | EF_MIPS_MACH);
// Check ISA compatibility.
- if (isArchMatched(New, Ret))
+ if (isArchMatched(New, ret))
continue;
- if (!isArchMatched(Ret, New)) {
- error("incompatible target ISA:\n>>> " + toString(Files[0].File) + ": " +
- getFullArchName(Ret) + "\n>>> " + toString(F.File) + ": " +
+ if (!isArchMatched(ret, New)) {
+ error("incompatible target ISA:\n>>> " + toString(files[0].file) + ": " +
+ getFullArchName(ret) + "\n>>> " + toString(f.file) + ": " +
getFullArchName(New));
return 0;
}
- Ret = New;
+ ret = New;
}
- return Ret;
+ return ret;
}
template <class ELFT> uint32_t elf::calcMipsEFlags() {
- std::vector<FileFlags> V;
- for (InputFile *F : ObjectFiles)
- V.push_back({F, cast<ObjFile<ELFT>>(F)->getObj().getHeader()->e_flags});
- if (V.empty())
+ std::vector<FileFlags> v;
+ for (InputFile *f : objectFiles)
+ v.push_back({f, cast<ObjFile<ELFT>>(f)->getObj().getHeader()->e_flags});
+ if (v.empty())
return 0;
- checkFlags(V);
- return getMiscFlags(V) | getPicFlags(V) | getArchFlags(V);
+ checkFlags(v);
+ return getMiscFlags(v) | getPicFlags(v) | getArchFlags(v);
}
-static int compareMipsFpAbi(uint8_t FpA, uint8_t FpB) {
- if (FpA == FpB)
+static int compareMipsFpAbi(uint8_t fpA, uint8_t fpB) {
+ if (fpA == fpB)
return 0;
- if (FpB == Mips::Val_GNU_MIPS_ABI_FP_ANY)
+ if (fpB == Mips::Val_GNU_MIPS_ABI_FP_ANY)
return 1;
- if (FpB == Mips::Val_GNU_MIPS_ABI_FP_64A &&
- FpA == Mips::Val_GNU_MIPS_ABI_FP_64)
+ if (fpB == Mips::Val_GNU_MIPS_ABI_FP_64A &&
+ fpA == Mips::Val_GNU_MIPS_ABI_FP_64)
return 1;
- if (FpB != Mips::Val_GNU_MIPS_ABI_FP_XX)
+ if (fpB != Mips::Val_GNU_MIPS_ABI_FP_XX)
return -1;
- if (FpA == Mips::Val_GNU_MIPS_ABI_FP_DOUBLE ||
- FpA == Mips::Val_GNU_MIPS_ABI_FP_64 ||
- FpA == Mips::Val_GNU_MIPS_ABI_FP_64A)
+ if (fpA == Mips::Val_GNU_MIPS_ABI_FP_DOUBLE ||
+ fpA == Mips::Val_GNU_MIPS_ABI_FP_64 ||
+ fpA == Mips::Val_GNU_MIPS_ABI_FP_64A)
return 1;
return -1;
}
-static StringRef getMipsFpAbiName(uint8_t FpAbi) {
- switch (FpAbi) {
+static StringRef getMipsFpAbiName(uint8_t fpAbi) {
+ switch (fpAbi) {
case Mips::Val_GNU_MIPS_ABI_FP_ANY:
return "any";
case Mips::Val_GNU_MIPS_ABI_FP_DOUBLE:
@@ -345,43 +344,43 @@ static StringRef getMipsFpAbiName(uint8_t FpAbi) {
}
}
-uint8_t elf::getMipsFpAbiFlag(uint8_t OldFlag, uint8_t NewFlag,
- StringRef FileName) {
- if (compareMipsFpAbi(NewFlag, OldFlag) >= 0)
- return NewFlag;
- if (compareMipsFpAbi(OldFlag, NewFlag) < 0)
- error(FileName + ": floating point ABI '" + getMipsFpAbiName(NewFlag) +
+uint8_t elf::getMipsFpAbiFlag(uint8_t oldFlag, uint8_t newFlag,
+ StringRef fileName) {
+ if (compareMipsFpAbi(newFlag, oldFlag) >= 0)
+ return newFlag;
+ if (compareMipsFpAbi(oldFlag, newFlag) < 0)
+ error(fileName + ": floating point ABI '" + getMipsFpAbiName(newFlag) +
"' is incompatible with target floating point ABI '" +
- getMipsFpAbiName(OldFlag) + "'");
- return OldFlag;
+ getMipsFpAbiName(oldFlag) + "'");
+ return oldFlag;
}
-template <class ELFT> static bool isN32Abi(const InputFile *F) {
- if (auto *EF = dyn_cast<ELFFileBase<ELFT>>(F))
- return EF->getObj().getHeader()->e_flags & EF_MIPS_ABI2;
+template <class ELFT> static bool isN32Abi(const InputFile *f) {
+ if (auto *ef = dyn_cast<ELFFileBase>(f))
+ return ef->template getObj<ELFT>().getHeader()->e_flags & EF_MIPS_ABI2;
return false;
}
-bool elf::isMipsN32Abi(const InputFile *F) {
- switch (Config->EKind) {
+bool elf::isMipsN32Abi(const InputFile *f) {
+ switch (config->ekind) {
case ELF32LEKind:
- return isN32Abi<ELF32LE>(F);
+ return isN32Abi<ELF32LE>(f);
case ELF32BEKind:
- return isN32Abi<ELF32BE>(F);
+ return isN32Abi<ELF32BE>(f);
case ELF64LEKind:
- return isN32Abi<ELF64LE>(F);
+ return isN32Abi<ELF64LE>(f);
case ELF64BEKind:
- return isN32Abi<ELF64BE>(F);
+ return isN32Abi<ELF64BE>(f);
default:
llvm_unreachable("unknown Config->EKind");
}
}
-bool elf::isMicroMips() { return Config->EFlags & EF_MIPS_MICROMIPS; }
+bool elf::isMicroMips() { return config->eflags & EF_MIPS_MICROMIPS; }
bool elf::isMipsR6() {
- uint32_t Arch = Config->EFlags & EF_MIPS_ARCH;
- return Arch == EF_MIPS_ARCH_32R6 || Arch == EF_MIPS_ARCH_64R6;
+ uint32_t arch = config->eflags & EF_MIPS_ARCH;
+ return arch == EF_MIPS_ARCH_32R6 || arch == EF_MIPS_ARCH_64R6;
}
template uint32_t elf::calcMipsEFlags<ELF32LE>();
diff --git a/contrib/llvm/tools/lld/ELF/Arch/PPC.cpp b/contrib/llvm/tools/lld/ELF/Arch/PPC.cpp
index 767378067341..46c5891e4f8a 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/PPC.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/PPC.cpp
@@ -1,13 +1,14 @@
//===- PPC.cpp ------------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
+#include "OutputSections.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "Target.h"
#include "lld/Common/ErrorHandler.h"
#include "llvm/Support/Endian.h"
@@ -22,60 +23,410 @@ namespace {
class PPC final : public TargetInfo {
public:
PPC();
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotHeader(uint8_t *buf) const override;
+ void writePltHeader(uint8_t *buf) const override {
+ llvm_unreachable("should call writePPC32GlinkSection() instead");
+ }
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override {
+ llvm_unreachable("should call writePPC32GlinkSection() instead");
+ }
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ bool needsThunk(RelExpr expr, RelType relocType, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const override;
+ uint32_t getThunkSectionSpacing() const override;
+ bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const override;
+ int getTlsGdRelaxSkip(RelType type) const override;
+ void relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
+static uint16_t lo(uint32_t v) { return v; }
+static uint16_t ha(uint32_t v) { return (v + 0x8000) >> 16; }
+
+static uint32_t readFromHalf16(const uint8_t *loc) {
+ return read32(config->isLE ? loc : loc - 2);
+}
+
+static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
+ write32(config->isLE ? loc : loc - 2, insn);
+}
+
+void elf::writePPC32GlinkSection(uint8_t *buf, size_t numEntries) {
+ // On PPC Secure PLT ABI, bl foo@plt jumps to a call stub, which loads an
+ // absolute address from a specific .plt slot (usually called .got.plt on
+ // other targets) and jumps there.
+ //
+ // a) With immediate binding (BIND_NOW), the .plt entry is resolved at load
+ // time. The .glink section is not used.
+ // b) With lazy binding, the .plt entry points to a `b PLTresolve`
+ // instruction in .glink, filled in by PPC::writeGotPlt().
+
+ // Write N `b PLTresolve` first.
+ for (size_t i = 0; i != numEntries; ++i)
+ write32(buf + 4 * i, 0x48000000 | 4 * (numEntries - i));
+ buf += 4 * numEntries;
+
+ // Then write PLTresolve(), which has two forms: PIC and non-PIC. PLTresolve()
+ // computes the PLT index (by computing the distance from the landing b to
+ // itself) and calls _dl_runtime_resolve() (in glibc).
+ uint32_t got = in.got->getVA();
+ uint32_t glink = in.plt->getVA(); // VA of .glink
+ const uint8_t *end = buf + 64;
+ if (config->isPic) {
+ uint32_t afterBcl = in.plt->getSize() - target->pltHeaderSize + 12;
+ uint32_t gotBcl = got + 4 - (glink + afterBcl);
+ write32(buf + 0, 0x3d6b0000 | ha(afterBcl)); // addis r11,r11,1f-glink@ha
+ write32(buf + 4, 0x7c0802a6); // mflr r0
+ write32(buf + 8, 0x429f0005); // bcl 20,30,.+4
+ write32(buf + 12, 0x396b0000 | lo(afterBcl)); // 1: addi r11,r11,1b-.glink@l
+ write32(buf + 16, 0x7d8802a6); // mflr r12
+ write32(buf + 20, 0x7c0803a6); // mtlr r0
+ write32(buf + 24, 0x7d6c5850); // sub r11,r11,r12
+ write32(buf + 28, 0x3d8c0000 | ha(gotBcl)); // addis 12,12,GOT+4-1b@ha
+ if (ha(gotBcl) == ha(gotBcl + 4)) {
+ write32(buf + 32, 0x800c0000 | lo(gotBcl)); // lwz r0,r12,GOT+4-1b@l(r12)
+ write32(buf + 36,
+ 0x818c0000 | lo(gotBcl + 4)); // lwz r12,r12,GOT+8-1b@l(r12)
+ } else {
+ write32(buf + 32, 0x840c0000 | lo(gotBcl)); // lwzu r0,r12,GOT+4-1b@l(r12)
+ write32(buf + 36, 0x818c0000 | 4); // lwz r12,r12,4(r12)
+ }
+ write32(buf + 40, 0x7c0903a6); // mtctr 0
+ write32(buf + 44, 0x7c0b5a14); // add r0,11,11
+ write32(buf + 48, 0x7d605a14); // add r11,0,11
+ write32(buf + 52, 0x4e800420); // bctr
+ buf += 56;
+ } else {
+ write32(buf + 0, 0x3d800000 | ha(got + 4)); // lis r12,GOT+4@ha
+ write32(buf + 4, 0x3d6b0000 | ha(-glink)); // addis r11,r11,-Glink@ha
+ if (ha(got + 4) == ha(got + 8))
+ write32(buf + 8, 0x800c0000 | lo(got + 4)); // lwz r0,GOT+4@l(r12)
+ else
+ write32(buf + 8, 0x840c0000 | lo(got + 4)); // lwzu r0,GOT+4@l(r12)
+ write32(buf + 12, 0x396b0000 | lo(-glink)); // addi r11,r11,-Glink@l
+ write32(buf + 16, 0x7c0903a6); // mtctr r0
+ write32(buf + 20, 0x7c0b5a14); // add r0,r11,r11
+ if (ha(got + 4) == ha(got + 8))
+ write32(buf + 24, 0x818c0000 | lo(got + 8)); // lwz r12,GOT+8@ha(r12)
+ else
+ write32(buf + 24, 0x818c0000 | 4); // lwz r12,4(r12)
+ write32(buf + 28, 0x7d605a14); // add r11,r0,r11
+ write32(buf + 32, 0x4e800420); // bctr
+ buf += 36;
+ }
+
+ // Pad with nop. They should not be executed.
+ for (; buf < end; buf += 4)
+ write32(buf, 0x60000000);
+}
+
PPC::PPC() {
- NoneRel = R_PPC_NONE;
- GotBaseSymOff = 0x8000;
- GotBaseSymInGotPlt = false;
+ gotRel = R_PPC_GLOB_DAT;
+ noneRel = R_PPC_NONE;
+ pltRel = R_PPC_JMP_SLOT;
+ relativeRel = R_PPC_RELATIVE;
+ iRelativeRel = R_PPC_IRELATIVE;
+ symbolicRel = R_PPC_ADDR32;
+ gotBaseSymInGotPlt = false;
+ gotHeaderEntriesNum = 3;
+ gotPltHeaderEntriesNum = 0;
+ pltHeaderSize = 64; // size of PLTresolve in .glink
+ pltEntrySize = 4;
+
+ needsThunks = true;
+
+ tlsModuleIndexRel = R_PPC_DTPMOD32;
+ tlsOffsetRel = R_PPC_DTPREL32;
+ tlsGotRel = R_PPC_TPREL32;
+
+ defaultMaxPageSize = 65536;
+ defaultImageBase = 0x10000000;
+
+ write32(trapInstr.data(), 0x7fe00008);
+}
+
+void PPC::writeGotHeader(uint8_t *buf) const {
+ // _GLOBAL_OFFSET_TABLE_[0] = _DYNAMIC
+ // glibc stores _dl_runtime_resolve in _GLOBAL_OFFSET_TABLE_[1],
+ // link_map in _GLOBAL_OFFSET_TABLE_[2].
+ write32(buf, mainPart->dynamic->getVA());
+}
+
+void PPC::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ // Address of the symbol resolver stub in .glink .
+ write32(buf, in.plt->getVA() + 4 * s.pltIndex);
+}
+
+bool PPC::needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const {
+ if (type != R_PPC_REL24 && type != R_PPC_PLTREL24)
+ return false;
+ if (s.isInPlt())
+ return true;
+ if (s.isUndefWeak())
+ return false;
+ return !(expr == R_PC && PPC::inBranchRange(type, branchAddr, s.getVA()));
}
-RelExpr PPC::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+uint32_t PPC::getThunkSectionSpacing() const { return 0x2000000; }
+
+bool PPC::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
+ uint64_t offset = dst - src;
+ if (type == R_PPC_REL24 || type == R_PPC_PLTREL24)
+ return isInt<26>(offset);
+ llvm_unreachable("unsupported relocation type used in branch");
+}
+
+RelExpr PPC::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
+ case R_PPC_DTPREL16:
+ case R_PPC_DTPREL16_HA:
+ case R_PPC_DTPREL16_HI:
+ case R_PPC_DTPREL16_LO:
+ case R_PPC_DTPREL32:
+ return R_DTPREL;
case R_PPC_REL14:
- case R_PPC_REL24:
case R_PPC_REL32:
+ case R_PPC_LOCAL24PC:
+ case R_PPC_REL16_LO:
+ case R_PPC_REL16_HI:
+ case R_PPC_REL16_HA:
return R_PC;
- case R_PPC_PLTREL24:
+ case R_PPC_GOT16:
+ return R_GOT_OFF;
+ case R_PPC_REL24:
return R_PLT_PC;
+ case R_PPC_PLTREL24:
+ return R_PPC32_PLTREL;
+ case R_PPC_GOT_TLSGD16:
+ return R_TLSGD_GOT;
+ case R_PPC_GOT_TLSLD16:
+ return R_TLSLD_GOT;
+ case R_PPC_GOT_TPREL16:
+ return R_GOT_OFF;
+ case R_PPC_TLS:
+ return R_TLSIE_HINT;
+ case R_PPC_TLSGD:
+ return R_TLSDESC_CALL;
+ case R_PPC_TLSLD:
+ return R_TLSLD_HINT;
+ case R_PPC_TPREL16:
+ case R_PPC_TPREL16_HA:
+ case R_PPC_TPREL16_LO:
+ case R_PPC_TPREL16_HI:
+ return R_TLS;
default:
return R_ABS;
}
}
-void PPC::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+RelType PPC::getDynRel(RelType type) const {
+ if (type == R_PPC_ADDR32)
+ return type;
+ return R_PPC_NONE;
+}
+
+static std::pair<RelType, uint64_t> fromDTPREL(RelType type, uint64_t val) {
+ uint64_t dtpBiasedVal = val - 0x8000;
+ switch (type) {
+ case R_PPC_DTPREL16:
+ return {R_PPC64_ADDR16, dtpBiasedVal};
+ case R_PPC_DTPREL16_HA:
+ return {R_PPC_ADDR16_HA, dtpBiasedVal};
+ case R_PPC_DTPREL16_HI:
+ return {R_PPC_ADDR16_HI, dtpBiasedVal};
+ case R_PPC_DTPREL16_LO:
+ return {R_PPC_ADDR16_LO, dtpBiasedVal};
+ case R_PPC_DTPREL32:
+ return {R_PPC_ADDR32, dtpBiasedVal};
+ default:
+ return {type, val};
+ }
+}
+
+void PPC::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ RelType newType;
+ std::tie(newType, val) = fromDTPREL(type, val);
+ switch (newType) {
+ case R_PPC_ADDR16:
+ checkIntUInt(loc, val, 16, type);
+ write16(loc, val);
+ break;
+ case R_PPC_GOT16:
+ case R_PPC_GOT_TLSGD16:
+ case R_PPC_GOT_TLSLD16:
+ case R_PPC_GOT_TPREL16:
+ case R_PPC_TPREL16:
+ checkInt(loc, val, 16, type);
+ write16(loc, val);
+ break;
case R_PPC_ADDR16_HA:
- write16be(Loc, (Val + 0x8000) >> 16);
+ case R_PPC_DTPREL16_HA:
+ case R_PPC_GOT_TLSGD16_HA:
+ case R_PPC_GOT_TLSLD16_HA:
+ case R_PPC_GOT_TPREL16_HA:
+ case R_PPC_REL16_HA:
+ case R_PPC_TPREL16_HA:
+ write16(loc, ha(val));
break;
case R_PPC_ADDR16_HI:
- write16be(Loc, Val >> 16);
+ case R_PPC_DTPREL16_HI:
+ case R_PPC_GOT_TLSGD16_HI:
+ case R_PPC_GOT_TLSLD16_HI:
+ case R_PPC_GOT_TPREL16_HI:
+ case R_PPC_REL16_HI:
+ case R_PPC_TPREL16_HI:
+ write16(loc, val >> 16);
break;
case R_PPC_ADDR16_LO:
- write16be(Loc, Val);
+ case R_PPC_DTPREL16_LO:
+ case R_PPC_GOT_TLSGD16_LO:
+ case R_PPC_GOT_TLSLD16_LO:
+ case R_PPC_GOT_TPREL16_LO:
+ case R_PPC_REL16_LO:
+ case R_PPC_TPREL16_LO:
+ write16(loc, val);
break;
case R_PPC_ADDR32:
case R_PPC_REL32:
- write32be(Loc, Val);
+ write32(loc, val);
break;
- case R_PPC_REL14:
- write32be(Loc, read32be(Loc) | (Val & 0xFFFC));
+ case R_PPC_REL14: {
+ uint32_t mask = 0x0000FFFC;
+ checkInt(loc, val, 16, type);
+ checkAlignment(loc, val, 4, type);
+ write32(loc, (read32(loc) & ~mask) | (val & mask));
break;
- case R_PPC_PLTREL24:
+ }
case R_PPC_REL24:
- write32be(Loc, read32be(Loc) | (Val & 0x3FFFFFC));
+ case R_PPC_LOCAL24PC:
+ case R_PPC_PLTREL24: {
+ uint32_t mask = 0x03FFFFFC;
+ checkInt(loc, val, 26, type);
+ checkAlignment(loc, val, 4, type);
+ write32(loc, (read32(loc) & ~mask) | (val & mask));
break;
+ }
+ default:
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
+ }
+}
+
+RelExpr PPC::adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const {
+ if (expr == R_RELAX_TLS_GD_TO_IE)
+ return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
+ if (expr == R_RELAX_TLS_LD_TO_LE)
+ return R_RELAX_TLS_LD_TO_LE_ABS;
+ return expr;
+}
+
+int PPC::getTlsGdRelaxSkip(RelType type) const {
+ // A __tls_get_addr call instruction is marked with 2 relocations:
+ //
+ // R_PPC_TLSGD / R_PPC_TLSLD: marker relocation
+ // R_PPC_REL24: __tls_get_addr
+ //
+ // After the relaxation we no longer call __tls_get_addr and should skip both
+ // relocations to not create a false dependence on __tls_get_addr being
+ // defined.
+ if (type == R_PPC_TLSGD || type == R_PPC_TLSLD)
+ return 2;
+ return 1;
+}
+
+void PPC::relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
+ case R_PPC_GOT_TLSGD16: {
+ // addi rT, rA, x@got@tlsgd --> lwz rT, x@got@tprel(rA)
+ uint32_t insn = readFromHalf16(loc);
+ writeFromHalf16(loc, 0x80000000 | (insn & 0x03ff0000));
+ relocateOne(loc, R_PPC_GOT_TPREL16, val);
+ break;
+ }
+ case R_PPC_TLSGD:
+ // bl __tls_get_addr(x@tldgd) --> add r3, r3, r2
+ write32(loc, 0x7c631214);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
+ }
+}
+
+void PPC::relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
+ case R_PPC_GOT_TLSGD16:
+ // addi r3, r31, x@got@tlsgd --> addis r3, r2, x@tprel@ha
+ writeFromHalf16(loc, 0x3c620000 | ha(val));
+ break;
+ case R_PPC_TLSGD:
+ // bl __tls_get_addr(x@tldgd) --> add r3, r3, x@tprel@l
+ write32(loc, 0x38630000 | lo(val));
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
+ }
+}
+
+void PPC::relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
+ case R_PPC_GOT_TLSLD16:
+ // addi r3, rA, x@got@tlsgd --> addis r3, r2, 0
+ writeFromHalf16(loc, 0x3c620000);
+ break;
+ case R_PPC_TLSLD:
+ // r3+x@dtprel computes r3+x-0x8000, while we want it to compute r3+x@tprel
+ // = r3+x-0x7000, so add 4096 to r3.
+ // bl __tls_get_addr(x@tlsld) --> addi r3, r3, 4096
+ write32(loc, 0x38631000);
+ break;
+ case R_PPC_DTPREL16:
+ case R_PPC_DTPREL16_HA:
+ case R_PPC_DTPREL16_HI:
+ case R_PPC_DTPREL16_LO:
+ relocateOne(loc, type, val);
+ break;
+ default:
+ llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
+ }
+}
+
+void PPC::relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
+ case R_PPC_GOT_TPREL16: {
+ // lwz rT, x@got@tprel(rA) --> addis rT, r2, x@tprel@ha
+ uint32_t rt = readFromHalf16(loc) & 0x03e00000;
+ writeFromHalf16(loc, 0x3c020000 | rt | ha(val));
+ break;
+ }
+ case R_PPC_TLS: {
+ uint32_t insn = read32(loc);
+ if (insn >> 26 != 31)
+ error("unrecognized instruction for IE to LE R_PPC_TLS");
+ // addi rT, rT, x@tls --> addi rT, rT, x@tprel@l
+ uint32_t dFormOp = getPPCDFormOp((read32(loc) & 0x000007fe) >> 1);
+ if (dFormOp == 0)
+ error("unrecognized instruction for IE to LE R_PPC_TLS");
+ write32(loc, (dFormOp << 26) | (insn & 0x03ff0000) | lo(val));
+ break;
+ }
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unsupported relocation for TLS IE to LE relaxation");
}
}
TargetInfo *elf::getPPCTargetInfo() {
- static PPC Target;
- return &Target;
+ static PPC target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/PPC64.cpp b/contrib/llvm/tools/lld/ELF/Arch/PPC64.cpp
index f02e818daee5..70d284cfad71 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/PPC64.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/PPC64.cpp
@@ -1,9 +1,8 @@
//===- PPC64.cpp ----------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -20,8 +19,8 @@ using namespace llvm::ELF;
using namespace lld;
using namespace lld::elf;
-static uint64_t PPC64TocOffset = 0x8000;
-static uint64_t DynamicThreadPointerOffset = 0x8000;
+static uint64_t ppc64TocOffset = 0x8000;
+static uint64_t dynamicThreadPointerOffset = 0x8000;
// The instruction encoding of bits 21-30 from the ISA for the Xform and Dform
// instructions that can be used as part of the initial exec TLS sequence.
@@ -65,16 +64,16 @@ uint64_t elf::getPPC64TocBase() {
// TOC starts where the first of these sections starts. We always create a
// .got when we see a relocation that uses it, so for us the start is always
// the .got.
- uint64_t TocVA = In.Got->getVA();
+ uint64_t tocVA = in.got->getVA();
// Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000
// thus permitting a full 64 Kbytes segment. Note that the glibc startup
// code (crt1.o) assumes that you can get from the TOC base to the
// start of the .toc section with only a single (signed) 16-bit relocation.
- return TocVA + PPC64TocOffset;
+ return tocVA + ppc64TocOffset;
}
-unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t StOther) {
+unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) {
// The offset is encoded into the 3 most significant bits of the st_other
// field, with some special values described in section 3.4.1 of the ABI:
// 0 --> Zero offset between the GEP and LEP, and the function does NOT use
@@ -86,44 +85,134 @@ unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t StOther) {
// 2 --> 2^2 = 4 bytes --> 1 instruction.
// 6 --> 2^6 = 64 bytes --> 16 instructions.
// 7 --> Reserved.
- uint8_t GepToLep = (StOther >> 5) & 7;
- if (GepToLep < 2)
+ uint8_t gepToLep = (stOther >> 5) & 7;
+ if (gepToLep < 2)
return 0;
// The value encoded in the st_other bits is the
// log-base-2(offset).
- if (GepToLep < 7)
- return 1 << GepToLep;
+ if (gepToLep < 7)
+ return 1 << gepToLep;
error("reserved value of 7 in the 3 most-significant-bits of st_other");
return 0;
}
+bool elf::isPPC64SmallCodeModelTocReloc(RelType type) {
+ // The only small code model relocations that access the .toc section.
+ return type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS;
+}
+
+// Find the R_PPC64_ADDR64 in .rela.toc with matching offset.
+template <typename ELFT>
+static std::pair<Defined *, int64_t>
+getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) {
+ if (tocSec->numRelocations == 0)
+ return {};
+
+ // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by
+ // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the
+ // relocation index in most cases.
+ //
+ // In rare cases a TOC entry may store a constant that doesn't need an
+ // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8
+ // points to a relocation with larger r_offset. Do a linear probe then.
+ // Constants are extremely uncommon in .toc and the extra number of array
+ // accesses can be seen as a small constant.
+ ArrayRef<typename ELFT::Rela> relas = tocSec->template relas<ELFT>();
+ uint64_t index = std::min<uint64_t>(offset / 8, relas.size() - 1);
+ for (;;) {
+ if (relas[index].r_offset == offset) {
+ Symbol &sym = tocSec->getFile<ELFT>()->getRelocTargetSym(relas[index]);
+ return {dyn_cast<Defined>(&sym), getAddend<ELFT>(relas[index])};
+ }
+ if (relas[index].r_offset < offset || index == 0)
+ break;
+ --index;
+ }
+ return {};
+}
+
+// When accessing a symbol defined in another translation unit, compilers
+// reserve a .toc entry, allocate a local label and generate toc-indirect
+// instuctions:
+//
+// addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA
+// ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry
+// ld/lwa 3, 0(3) # load the value from the address
+//
+// .section .toc,"aw",@progbits
+// .LC0: .tc var[TC],var
+//
+// If var is defined, non-preemptable and addressable with a 32-bit signed
+// offset from the toc base, the address of var can be computed by adding an
+// offset to the toc base, saving a load.
+//
+// addis 3,2,var@toc@ha # this may be relaxed to a nop,
+// addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc
+// ld/lwa 3, 0(3) # load the value from the address
+//
+// Returns true if the relaxation is performed.
+bool elf::tryRelaxPPC64TocIndirection(RelType type, const Relocation &rel,
+ uint8_t *bufLoc) {
+ assert(config->tocOptimize);
+ if (rel.addend < 0)
+ return false;
+
+ // If the symbol is not the .toc section, this isn't a toc-indirection.
+ Defined *defSym = dyn_cast<Defined>(rel.sym);
+ if (!defSym || !defSym->isSection() || defSym->section->name != ".toc")
+ return false;
+
+ Defined *d;
+ int64_t addend;
+ auto *tocISB = cast<InputSectionBase>(defSym->section);
+ std::tie(d, addend) =
+ config->isLE ? getRelaTocSymAndAddend<ELF64LE>(tocISB, rel.addend)
+ : getRelaTocSymAndAddend<ELF64BE>(tocISB, rel.addend);
+
+ // Only non-preemptable defined symbols can be relaxed.
+ if (!d || d->isPreemptible)
+ return false;
+
+ // Two instructions can materialize a 32-bit signed offset from the toc base.
+ uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase();
+ if (!isInt<32>(tocRelative))
+ return false;
+
+ // Add PPC64TocOffset that will be subtracted by relocateOne().
+ target->relaxGot(bufLoc, type, tocRelative + ppc64TocOffset);
+ return true;
+}
+
namespace {
class PPC64 final : public TargetInfo {
public:
PPC64();
+ int getTlsGdRelaxSkip(RelType type) const override;
uint32_t calcEFlags() const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void writeGotHeader(uint8_t *Buf) const override;
- bool needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+ void writeGotHeader(uint8_t *buf) const override;
+ bool needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const override;
uint32_t getThunkSectionSpacing() const override;
- bool inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const override;
- RelExpr adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const override;
- void relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
-
- bool adjustPrologueForCrossSplitStack(uint8_t *Loc, uint8_t *End,
- uint8_t StOther) const override;
+ bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override;
+ RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const override;
+ void relaxGot(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+
+ bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
+ uint8_t stOther) const override;
};
} // namespace
@@ -131,19 +220,19 @@ public:
// #higher(value), #highera(value), #highest(value), and #highesta(value)
// macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi
// document.
-static uint16_t lo(uint64_t V) { return V; }
-static uint16_t hi(uint64_t V) { return V >> 16; }
-static uint16_t ha(uint64_t V) { return (V + 0x8000) >> 16; }
-static uint16_t higher(uint64_t V) { return V >> 32; }
-static uint16_t highera(uint64_t V) { return (V + 0x8000) >> 32; }
-static uint16_t highest(uint64_t V) { return V >> 48; }
-static uint16_t highesta(uint64_t V) { return (V + 0x8000) >> 48; }
+static uint16_t lo(uint64_t v) { return v; }
+static uint16_t hi(uint64_t v) { return v >> 16; }
+static uint16_t ha(uint64_t v) { return (v + 0x8000) >> 16; }
+static uint16_t higher(uint64_t v) { return v >> 32; }
+static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; }
+static uint16_t highest(uint64_t v) { return v >> 48; }
+static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; }
// Extracts the 'PO' field of an instruction encoding.
-static uint8_t getPrimaryOpCode(uint32_t Encoding) { return (Encoding >> 26); }
+static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); }
-static bool isDQFormInstruction(uint32_t Encoding) {
- switch (getPrimaryOpCode(Encoding)) {
+static bool isDQFormInstruction(uint32_t encoding) {
+ switch (getPrimaryOpCode(encoding)) {
default:
return false;
case 56:
@@ -153,12 +242,12 @@ static bool isDQFormInstruction(uint32_t Encoding) {
// There are both DS and DQ instruction forms with this primary opcode.
// Namely `lxv` and `stxv` are the DQ-forms that use it.
// The DS 'XO' bits being set to 01 is restricted to DQ form.
- return (Encoding & 3) == 0x1;
+ return (encoding & 3) == 0x1;
}
}
-static bool isInstructionUpdateForm(uint32_t Encoding) {
- switch (getPrimaryOpCode(Encoding)) {
+static bool isInstructionUpdateForm(uint32_t encoding) {
+ switch (getPrimaryOpCode(encoding)) {
default:
return false;
case LBZU:
@@ -177,7 +266,7 @@ static bool isInstructionUpdateForm(uint32_t Encoding) {
// between LD/LDU/LWA
case LD:
case STD:
- return (Encoding & 3) == 1;
+ return (encoding & 3) == 1;
}
}
@@ -186,40 +275,38 @@ static bool isInstructionUpdateForm(uint32_t Encoding) {
// pointer is pointing into the middle of the word we want to extract, and on
// little-endian it is pointing to the start of the word. These 2 helpers are to
// simplify reading and writing in that context.
-static void writeInstrFromHalf16(uint8_t *Loc, uint32_t Instr) {
- write32(Loc - (Config->EKind == ELF64BEKind ? 2 : 0), Instr);
+static void writeFromHalf16(uint8_t *loc, uint32_t insn) {
+ write32(config->isLE ? loc : loc - 2, insn);
}
-static uint32_t readInstrFromHalf16(const uint8_t *Loc) {
- return read32(Loc - (Config->EKind == ELF64BEKind ? 2 : 0));
+static uint32_t readFromHalf16(const uint8_t *loc) {
+ return read32(config->isLE ? loc : loc - 2);
}
PPC64::PPC64() {
- GotRel = R_PPC64_GLOB_DAT;
- NoneRel = R_PPC64_NONE;
- PltRel = R_PPC64_JMP_SLOT;
- RelativeRel = R_PPC64_RELATIVE;
- IRelativeRel = R_PPC64_IRELATIVE;
- GotEntrySize = 8;
- PltEntrySize = 4;
- GotPltEntrySize = 8;
- GotBaseSymInGotPlt = false;
- GotBaseSymOff = 0x8000;
- GotHeaderEntriesNum = 1;
- GotPltHeaderEntriesNum = 2;
- PltHeaderSize = 60;
- NeedsThunks = true;
-
- TlsModuleIndexRel = R_PPC64_DTPMOD64;
- TlsOffsetRel = R_PPC64_DTPREL64;
-
- TlsGotRel = R_PPC64_TPREL64;
-
- NeedsMoreStackNonSplit = false;
+ gotRel = R_PPC64_GLOB_DAT;
+ noneRel = R_PPC64_NONE;
+ pltRel = R_PPC64_JMP_SLOT;
+ relativeRel = R_PPC64_RELATIVE;
+ iRelativeRel = R_PPC64_IRELATIVE;
+ symbolicRel = R_PPC64_ADDR64;
+ pltEntrySize = 4;
+ gotBaseSymInGotPlt = false;
+ gotHeaderEntriesNum = 1;
+ gotPltHeaderEntriesNum = 2;
+ pltHeaderSize = 60;
+ needsThunks = true;
+
+ tlsModuleIndexRel = R_PPC64_DTPMOD64;
+ tlsOffsetRel = R_PPC64_DTPREL64;
+
+ tlsGotRel = R_PPC64_TPREL64;
+
+ needsMoreStackNonSplit = false;
// We need 64K pages (at least under glibc/Linux, the loader won't
// set different permissions on a finer granularity than that).
- DefaultMaxPageSize = 65536;
+ defaultMaxPageSize = 65536;
// The PPC64 ELF ABI v1 spec, says:
//
@@ -229,31 +316,66 @@ PPC64::PPC64() {
//
// And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers
// use 0x10000000 as the starting address.
- DefaultImageBase = 0x10000000;
+ defaultImageBase = 0x10000000;
- write32(TrapInstr.data(), 0x7fe00008);
+ write32(trapInstr.data(), 0x7fe00008);
}
-static uint32_t getEFlags(InputFile *File) {
- if (Config->EKind == ELF64BEKind)
- return cast<ObjFile<ELF64BE>>(File)->getObj().getHeader()->e_flags;
- return cast<ObjFile<ELF64LE>>(File)->getObj().getHeader()->e_flags;
+int PPC64::getTlsGdRelaxSkip(RelType type) const {
+ // A __tls_get_addr call instruction is marked with 2 relocations:
+ //
+ // R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation
+ // R_PPC64_REL24: __tls_get_addr
+ //
+ // After the relaxation we no longer call __tls_get_addr and should skip both
+ // relocations to not create a false dependence on __tls_get_addr being
+ // defined.
+ if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD)
+ return 2;
+ return 1;
+}
+
+static uint32_t getEFlags(InputFile *file) {
+ if (config->ekind == ELF64BEKind)
+ return cast<ObjFile<ELF64BE>>(file)->getObj().getHeader()->e_flags;
+ return cast<ObjFile<ELF64LE>>(file)->getObj().getHeader()->e_flags;
}
// This file implements v2 ABI. This function makes sure that all
// object files have v2 or an unspecified version as an ABI version.
uint32_t PPC64::calcEFlags() const {
- for (InputFile *F : ObjectFiles) {
- uint32_t Flag = getEFlags(F);
- if (Flag == 1)
- error(toString(F) + ": ABI version 1 is not supported");
- else if (Flag > 2)
- error(toString(F) + ": unrecognized e_flags: " + Twine(Flag));
+ for (InputFile *f : objectFiles) {
+ uint32_t flag = getEFlags(f);
+ if (flag == 1)
+ error(toString(f) + ": ABI version 1 is not supported");
+ else if (flag > 2)
+ error(toString(f) + ": unrecognized e_flags: " + Twine(flag));
}
return 2;
}
-void PPC64::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void PPC64::relaxGot(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
+ case R_PPC64_TOC16_HA:
+ // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop".
+ relocateOne(loc, type, val);
+ break;
+ case R_PPC64_TOC16_LO_DS: {
+ // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or
+ // "addi reg, 2, var@toc".
+ uint32_t insn = readFromHalf16(loc);
+ if (getPrimaryOpCode(insn) != LD)
+ error("expected a 'ld' for got-indirect to toc-relative relaxing");
+ writeFromHalf16(loc, (insn & 0x03ffffff) | 0x38000000);
+ relocateOne(loc, R_PPC64_TOC16_LO, val);
+ break;
+ }
+ default:
+ llvm_unreachable("unexpected relocation type");
+ }
+}
+
+void PPC64::relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const {
// Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement.
// The general dynamic code sequence for a global `x` will look like:
// Instruction Relocation Symbol
@@ -269,30 +391,30 @@ void PPC64::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// bl __tls_get_addr(x@tlsgd) into nop
// nop into addi r3, r3, x@tprel@l
- switch (Type) {
+ switch (type) {
case R_PPC64_GOT_TLSGD16_HA:
- writeInstrFromHalf16(Loc, 0x60000000); // nop
+ writeFromHalf16(loc, 0x60000000); // nop
break;
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO:
- writeInstrFromHalf16(Loc, 0x3c6d0000); // addis r3, r13
- relocateOne(Loc, R_PPC64_TPREL16_HA, Val);
+ writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13
+ relocateOne(loc, R_PPC64_TPREL16_HA, val);
break;
case R_PPC64_TLSGD:
- write32(Loc, 0x60000000); // nop
- write32(Loc + 4, 0x38630000); // addi r3, r3
+ write32(loc, 0x60000000); // nop
+ write32(loc + 4, 0x38630000); // addi r3, r3
// Since we are relocating a half16 type relocation and Loc + 4 points to
// the start of an instruction we need to advance the buffer by an extra
// 2 bytes on BE.
- relocateOne(Loc + 4 + (Config->EKind == ELF64BEKind ? 2 : 0),
- R_PPC64_TPREL16_LO, Val);
+ relocateOne(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0),
+ R_PPC64_TPREL16_LO, val);
break;
default:
llvm_unreachable("unsupported relocation for TLS GD to LE relaxation");
}
}
-void PPC64::relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void PPC64::relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const {
// Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement.
// The local dynamic code sequence for a global `x` will look like:
// Instruction Relocation Symbol
@@ -308,16 +430,16 @@ void PPC64::relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// bl __tls_get_addr(x@tlsgd) into nop
// nop into addi r3, r3, 4096
- switch (Type) {
+ switch (type) {
case R_PPC64_GOT_TLSLD16_HA:
- writeInstrFromHalf16(Loc, 0x60000000); // nop
+ writeFromHalf16(loc, 0x60000000); // nop
break;
case R_PPC64_GOT_TLSLD16_LO:
- writeInstrFromHalf16(Loc, 0x3c6d0000); // addis r3, r13, 0
+ writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13, 0
break;
case R_PPC64_TLSLD:
- write32(Loc, 0x60000000); // nop
- write32(Loc + 4, 0x38631000); // addi r3, r3, 4096
+ write32(loc, 0x60000000); // nop
+ write32(loc + 4, 0x38631000); // addi r3, r3, 4096
break;
case R_PPC64_DTPREL16:
case R_PPC64_DTPREL16_HA:
@@ -325,19 +447,15 @@ void PPC64::relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_PPC64_DTPREL16_DS:
case R_PPC64_DTPREL16_LO:
case R_PPC64_DTPREL16_LO_DS:
- case R_PPC64_GOT_DTPREL16_HA:
- case R_PPC64_GOT_DTPREL16_LO_DS:
- case R_PPC64_GOT_DTPREL16_DS:
- case R_PPC64_GOT_DTPREL16_HI:
- relocateOne(Loc, Type, Val);
+ relocateOne(loc, type, val);
break;
default:
llvm_unreachable("unsupported relocation for TLS LD to LE relaxation");
}
}
-static unsigned getDFormOp(unsigned SecondaryOp) {
- switch (SecondaryOp) {
+unsigned elf::getPPCDFormOp(unsigned secondaryOp) {
+ switch (secondaryOp) {
case LBZX:
return LBZ;
case LHZX:
@@ -357,12 +475,11 @@ static unsigned getDFormOp(unsigned SecondaryOp) {
case ADD:
return ADDI;
default:
- error("unrecognized instruction for IE to LE R_PPC64_TLS");
return 0;
}
}
-void PPC64::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void PPC64::relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const {
// The initial exec code sequence for a global `x` will look like:
// Instruction Relocation Symbol
// addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x
@@ -382,26 +499,28 @@ void PPC64::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// instruction, if we are accessing memory it will use any of the X-form
// indexed load or store instructions.
- unsigned Offset = (Config->EKind == ELF64BEKind) ? 2 : 0;
- switch (Type) {
+ unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0;
+ switch (type) {
case R_PPC64_GOT_TPREL16_HA:
- write32(Loc - Offset, 0x60000000); // nop
+ write32(loc - offset, 0x60000000); // nop
break;
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_TPREL16_DS: {
- uint32_t RegNo = read32(Loc - Offset) & 0x03E00000; // bits 6-10
- write32(Loc - Offset, 0x3C0D0000 | RegNo); // addis RegNo, r13
- relocateOne(Loc, R_PPC64_TPREL16_HA, Val);
+ uint32_t regNo = read32(loc - offset) & 0x03E00000; // bits 6-10
+ write32(loc - offset, 0x3C0D0000 | regNo); // addis RegNo, r13
+ relocateOne(loc, R_PPC64_TPREL16_HA, val);
break;
}
case R_PPC64_TLS: {
- uint32_t PrimaryOp = getPrimaryOpCode(read32(Loc));
- if (PrimaryOp != 31)
+ uint32_t primaryOp = getPrimaryOpCode(read32(loc));
+ if (primaryOp != 31)
+ error("unrecognized instruction for IE to LE R_PPC64_TLS");
+ uint32_t secondaryOp = (read32(loc) & 0x000007FE) >> 1; // bits 21-30
+ uint32_t dFormOp = getPPCDFormOp(secondaryOp);
+ if (dFormOp == 0)
error("unrecognized instruction for IE to LE R_PPC64_TLS");
- uint32_t SecondaryOp = (read32(Loc) & 0x000007FE) >> 1; // bits 21-30
- uint32_t DFormOp = getDFormOp(SecondaryOp);
- write32(Loc, ((DFormOp << 26) | (read32(Loc) & 0x03FFFFFF)));
- relocateOne(Loc + Offset, R_PPC64_TPREL16_LO, Val);
+ write32(loc, ((dFormOp << 26) | (read32(loc) & 0x03FFFFFF)));
+ relocateOne(loc + offset, R_PPC64_TPREL16_LO, val);
break;
}
default:
@@ -410,9 +529,9 @@ void PPC64::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
}
}
-RelExpr PPC64::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr PPC64::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_PPC64_GOT16:
case R_PPC64_GOT16_DS:
case R_PPC64_GOT16_HA:
@@ -422,16 +541,17 @@ RelExpr PPC64::getRelExpr(RelType Type, const Symbol &S,
return R_GOT_OFF;
case R_PPC64_TOC16:
case R_PPC64_TOC16_DS:
- case R_PPC64_TOC16_HA:
case R_PPC64_TOC16_HI:
case R_PPC64_TOC16_LO:
- case R_PPC64_TOC16_LO_DS:
return R_GOTREL;
+ case R_PPC64_TOC16_HA:
+ case R_PPC64_TOC16_LO_DS:
+ return config->tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL;
case R_PPC64_TOC:
- return R_PPC_TOC;
+ return R_PPC64_TOCBASE;
case R_PPC64_REL14:
case R_PPC64_REL24:
- return R_PPC_CALL_PLT;
+ return R_PPC64_CALL_PLT;
case R_PPC64_REL16_LO:
case R_PPC64_REL16_HA:
case R_PPC64_REL32:
@@ -479,7 +599,7 @@ RelExpr PPC64::getRelExpr(RelType Type, const Symbol &S,
case R_PPC64_DTPREL16_LO:
case R_PPC64_DTPREL16_LO_DS:
case R_PPC64_DTPREL64:
- return R_ABS;
+ return R_DTPREL;
case R_PPC64_TLSGD:
return R_TLSDESC_CALL;
case R_PPC64_TLSLD:
@@ -491,115 +611,121 @@ RelExpr PPC64::getRelExpr(RelType Type, const Symbol &S,
}
}
-void PPC64::writeGotHeader(uint8_t *Buf) const {
- write64(Buf, getPPC64TocBase());
+RelType PPC64::getDynRel(RelType type) const {
+ if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC)
+ return R_PPC64_ADDR64;
+ return R_PPC64_NONE;
+}
+
+void PPC64::writeGotHeader(uint8_t *buf) const {
+ write64(buf, getPPC64TocBase());
}
-void PPC64::writePltHeader(uint8_t *Buf) const {
+void PPC64::writePltHeader(uint8_t *buf) const {
// The generic resolver stub goes first.
- write32(Buf + 0, 0x7c0802a6); // mflr r0
- write32(Buf + 4, 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
- write32(Buf + 8, 0x7d6802a6); // mflr r11
- write32(Buf + 12, 0x7c0803a6); // mtlr r0
- write32(Buf + 16, 0x7d8b6050); // subf r12, r11, r12
- write32(Buf + 20, 0x380cffcc); // subi r0,r12,52
- write32(Buf + 24, 0x7800f082); // srdi r0,r0,62,2
- write32(Buf + 28, 0xe98b002c); // ld r12,44(r11)
- write32(Buf + 32, 0x7d6c5a14); // add r11,r12,r11
- write32(Buf + 36, 0xe98b0000); // ld r12,0(r11)
- write32(Buf + 40, 0xe96b0008); // ld r11,8(r11)
- write32(Buf + 44, 0x7d8903a6); // mtctr r12
- write32(Buf + 48, 0x4e800420); // bctr
+ write32(buf + 0, 0x7c0802a6); // mflr r0
+ write32(buf + 4, 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8>
+ write32(buf + 8, 0x7d6802a6); // mflr r11
+ write32(buf + 12, 0x7c0803a6); // mtlr r0
+ write32(buf + 16, 0x7d8b6050); // subf r12, r11, r12
+ write32(buf + 20, 0x380cffcc); // subi r0,r12,52
+ write32(buf + 24, 0x7800f082); // srdi r0,r0,62,2
+ write32(buf + 28, 0xe98b002c); // ld r12,44(r11)
+ write32(buf + 32, 0x7d6c5a14); // add r11,r12,r11
+ write32(buf + 36, 0xe98b0000); // ld r12,0(r11)
+ write32(buf + 40, 0xe96b0008); // ld r11,8(r11)
+ write32(buf + 44, 0x7d8903a6); // mtctr r12
+ write32(buf + 48, 0x4e800420); // bctr
// The 'bcl' instruction will set the link register to the address of the
// following instruction ('mflr r11'). Here we store the offset from that
// instruction to the first entry in the GotPlt section.
- int64_t GotPltOffset = In.GotPlt->getVA() - (In.Plt->getVA() + 8);
- write64(Buf + 52, GotPltOffset);
+ int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8);
+ write64(buf + 52, gotPltOffset);
}
-void PPC64::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- int32_t Offset = PltHeaderSize + Index * PltEntrySize;
+void PPC64::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ int32_t offset = pltHeaderSize + index * pltEntrySize;
// bl __glink_PLTresolve
- write32(Buf, 0x48000000 | ((-Offset) & 0x03FFFFFc));
+ write32(buf, 0x48000000 | ((-offset) & 0x03FFFFFc));
}
-static std::pair<RelType, uint64_t> toAddr16Rel(RelType Type, uint64_t Val) {
+static std::pair<RelType, uint64_t> toAddr16Rel(RelType type, uint64_t val) {
// Relocations relative to the toc-base need to be adjusted by the Toc offset.
- uint64_t TocBiasedVal = Val - PPC64TocOffset;
+ uint64_t tocBiasedVal = val - ppc64TocOffset;
// Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset.
- uint64_t DTPBiasedVal = Val - DynamicThreadPointerOffset;
+ uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset;
- switch (Type) {
+ switch (type) {
// TOC biased relocation.
case R_PPC64_GOT16:
case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSLD16:
case R_PPC64_TOC16:
- return {R_PPC64_ADDR16, TocBiasedVal};
+ return {R_PPC64_ADDR16, tocBiasedVal};
case R_PPC64_GOT16_DS:
case R_PPC64_TOC16_DS:
case R_PPC64_GOT_TPREL16_DS:
case R_PPC64_GOT_DTPREL16_DS:
- return {R_PPC64_ADDR16_DS, TocBiasedVal};
+ return {R_PPC64_ADDR16_DS, tocBiasedVal};
case R_PPC64_GOT16_HA:
case R_PPC64_GOT_TLSGD16_HA:
case R_PPC64_GOT_TLSLD16_HA:
case R_PPC64_GOT_TPREL16_HA:
case R_PPC64_GOT_DTPREL16_HA:
case R_PPC64_TOC16_HA:
- return {R_PPC64_ADDR16_HA, TocBiasedVal};
+ return {R_PPC64_ADDR16_HA, tocBiasedVal};
case R_PPC64_GOT16_HI:
case R_PPC64_GOT_TLSGD16_HI:
case R_PPC64_GOT_TLSLD16_HI:
case R_PPC64_GOT_TPREL16_HI:
case R_PPC64_GOT_DTPREL16_HI:
case R_PPC64_TOC16_HI:
- return {R_PPC64_ADDR16_HI, TocBiasedVal};
+ return {R_PPC64_ADDR16_HI, tocBiasedVal};
case R_PPC64_GOT16_LO:
case R_PPC64_GOT_TLSGD16_LO:
case R_PPC64_GOT_TLSLD16_LO:
case R_PPC64_TOC16_LO:
- return {R_PPC64_ADDR16_LO, TocBiasedVal};
+ return {R_PPC64_ADDR16_LO, tocBiasedVal};
case R_PPC64_GOT16_LO_DS:
case R_PPC64_TOC16_LO_DS:
case R_PPC64_GOT_TPREL16_LO_DS:
case R_PPC64_GOT_DTPREL16_LO_DS:
- return {R_PPC64_ADDR16_LO_DS, TocBiasedVal};
+ return {R_PPC64_ADDR16_LO_DS, tocBiasedVal};
// Dynamic Thread pointer biased relocation types.
case R_PPC64_DTPREL16:
- return {R_PPC64_ADDR16, DTPBiasedVal};
+ return {R_PPC64_ADDR16, dtpBiasedVal};
case R_PPC64_DTPREL16_DS:
- return {R_PPC64_ADDR16_DS, DTPBiasedVal};
+ return {R_PPC64_ADDR16_DS, dtpBiasedVal};
case R_PPC64_DTPREL16_HA:
- return {R_PPC64_ADDR16_HA, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HA, dtpBiasedVal};
case R_PPC64_DTPREL16_HI:
- return {R_PPC64_ADDR16_HI, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HI, dtpBiasedVal};
case R_PPC64_DTPREL16_HIGHER:
- return {R_PPC64_ADDR16_HIGHER, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal};
case R_PPC64_DTPREL16_HIGHERA:
- return {R_PPC64_ADDR16_HIGHERA, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal};
case R_PPC64_DTPREL16_HIGHEST:
- return {R_PPC64_ADDR16_HIGHEST, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal};
case R_PPC64_DTPREL16_HIGHESTA:
- return {R_PPC64_ADDR16_HIGHESTA, DTPBiasedVal};
+ return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal};
case R_PPC64_DTPREL16_LO:
- return {R_PPC64_ADDR16_LO, DTPBiasedVal};
+ return {R_PPC64_ADDR16_LO, dtpBiasedVal};
case R_PPC64_DTPREL16_LO_DS:
- return {R_PPC64_ADDR16_LO_DS, DTPBiasedVal};
+ return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal};
case R_PPC64_DTPREL64:
- return {R_PPC64_ADDR64, DTPBiasedVal};
+ return {R_PPC64_ADDR64, dtpBiasedVal};
default:
- return {Type, Val};
+ return {type, val};
}
}
-static bool isTocOptType(RelType Type) {
- switch (Type) {
+static bool isTocOptType(RelType type) {
+ switch (type) {
case R_PPC64_GOT16_HA:
case R_PPC64_GOT16_LO_DS:
case R_PPC64_TOC16_HA:
@@ -611,66 +737,69 @@ static bool isTocOptType(RelType Type) {
}
}
-void PPC64::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void PPC64::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
// We need to save the original relocation type to use in diagnostics, and
// use the original type to determine if we should toc-optimize the
// instructions being relocated.
- RelType OriginalType = Type;
- bool ShouldTocOptimize = isTocOptType(Type);
+ RelType originalType = type;
+ bool shouldTocOptimize = isTocOptType(type);
// For dynamic thread pointer relative, toc-relative, and got-indirect
// relocations, proceed in terms of the corresponding ADDR16 relocation type.
- std::tie(Type, Val) = toAddr16Rel(Type, Val);
+ std::tie(type, val) = toAddr16Rel(type, val);
- switch (Type) {
+ switch (type) {
case R_PPC64_ADDR14: {
- checkAlignment(Loc, Val, 4, Type);
+ checkAlignment(loc, val, 4, type);
// Preserve the AA/LK bits in the branch instruction
- uint8_t AALK = Loc[3];
- write16(Loc + 2, (AALK & 3) | (Val & 0xfffc));
+ uint8_t aalk = loc[3];
+ write16(loc + 2, (aalk & 3) | (val & 0xfffc));
break;
}
case R_PPC64_ADDR16:
- case R_PPC64_TPREL16:
- checkInt(Loc, Val, 16, OriginalType);
- write16(Loc, Val);
+ checkIntUInt(loc, val, 16, originalType);
+ write16(loc, val);
+ break;
+ case R_PPC64_ADDR32:
+ checkIntUInt(loc, val, 32, originalType);
+ write32(loc, val);
break;
case R_PPC64_ADDR16_DS:
case R_PPC64_TPREL16_DS: {
- checkInt(Loc, Val, 16, OriginalType);
+ checkInt(loc, val, 16, originalType);
// DQ-form instructions use bits 28-31 as part of the instruction encoding
// DS-form instructions only use bits 30-31.
- uint16_t Mask = isDQFormInstruction(readInstrFromHalf16(Loc)) ? 0xF : 0x3;
- checkAlignment(Loc, lo(Val), Mask + 1, OriginalType);
- write16(Loc, (read16(Loc) & Mask) | lo(Val));
+ uint16_t mask = isDQFormInstruction(readFromHalf16(loc)) ? 0xf : 0x3;
+ checkAlignment(loc, lo(val), mask + 1, originalType);
+ write16(loc, (read16(loc) & mask) | lo(val));
} break;
case R_PPC64_ADDR16_HA:
case R_PPC64_REL16_HA:
case R_PPC64_TPREL16_HA:
- if (Config->TocOptimize && ShouldTocOptimize && ha(Val) == 0)
- writeInstrFromHalf16(Loc, 0x60000000);
+ if (config->tocOptimize && shouldTocOptimize && ha(val) == 0)
+ writeFromHalf16(loc, 0x60000000);
else
- write16(Loc, ha(Val));
+ write16(loc, ha(val));
break;
case R_PPC64_ADDR16_HI:
case R_PPC64_REL16_HI:
case R_PPC64_TPREL16_HI:
- write16(Loc, hi(Val));
+ write16(loc, hi(val));
break;
case R_PPC64_ADDR16_HIGHER:
case R_PPC64_TPREL16_HIGHER:
- write16(Loc, higher(Val));
+ write16(loc, higher(val));
break;
case R_PPC64_ADDR16_HIGHERA:
case R_PPC64_TPREL16_HIGHERA:
- write16(Loc, highera(Val));
+ write16(loc, highera(val));
break;
case R_PPC64_ADDR16_HIGHEST:
case R_PPC64_TPREL16_HIGHEST:
- write16(Loc, highest(Val));
+ write16(loc, highest(val));
break;
case R_PPC64_ADDR16_HIGHESTA:
case R_PPC64_TPREL16_HIGHESTA:
- write16(Loc, highesta(Val));
+ write16(loc, highesta(val));
break;
case R_PPC64_ADDR16_LO:
case R_PPC64_REL16_LO:
@@ -678,89 +807,93 @@ void PPC64::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
// When the high-adjusted part of a toc relocation evalutes to 0, it is
// changed into a nop. The lo part then needs to be updated to use the
// toc-pointer register r2, as the base register.
- if (Config->TocOptimize && ShouldTocOptimize && ha(Val) == 0) {
- uint32_t Instr = readInstrFromHalf16(Loc);
- if (isInstructionUpdateForm(Instr))
- error(getErrorLocation(Loc) +
+ if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
+ uint32_t insn = readFromHalf16(loc);
+ if (isInstructionUpdateForm(insn))
+ error(getErrorLocation(loc) +
"can't toc-optimize an update instruction: 0x" +
- utohexstr(Instr));
- Instr = (Instr & 0xFFE00000) | 0x00020000;
- writeInstrFromHalf16(Loc, Instr);
+ utohexstr(insn));
+ writeFromHalf16(loc, (insn & 0xffe00000) | 0x00020000 | lo(val));
+ } else {
+ write16(loc, lo(val));
}
- write16(Loc, lo(Val));
break;
case R_PPC64_ADDR16_LO_DS:
case R_PPC64_TPREL16_LO_DS: {
// DQ-form instructions use bits 28-31 as part of the instruction encoding
// DS-form instructions only use bits 30-31.
- uint32_t Inst = readInstrFromHalf16(Loc);
- uint16_t Mask = isDQFormInstruction(Inst) ? 0xF : 0x3;
- checkAlignment(Loc, lo(Val), Mask + 1, OriginalType);
- if (Config->TocOptimize && ShouldTocOptimize && ha(Val) == 0) {
+ uint32_t insn = readFromHalf16(loc);
+ uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3;
+ checkAlignment(loc, lo(val), mask + 1, originalType);
+ if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) {
// When the high-adjusted part of a toc relocation evalutes to 0, it is
// changed into a nop. The lo part then needs to be updated to use the toc
// pointer register r2, as the base register.
- if (isInstructionUpdateForm(Inst))
- error(getErrorLocation(Loc) +
+ if (isInstructionUpdateForm(insn))
+ error(getErrorLocation(loc) +
"Can't toc-optimize an update instruction: 0x" +
- Twine::utohexstr(Inst));
- Inst = (Inst & 0xFFE0000F) | 0x00020000;
- writeInstrFromHalf16(Loc, Inst);
+ Twine::utohexstr(insn));
+ insn &= 0xffe00000 | mask;
+ writeFromHalf16(loc, insn | 0x00020000 | lo(val));
+ } else {
+ write16(loc, (read16(loc) & mask) | lo(val));
}
- write16(Loc, (read16(Loc) & Mask) | lo(Val));
} break;
- case R_PPC64_ADDR32:
+ case R_PPC64_TPREL16:
+ checkInt(loc, val, 16, originalType);
+ write16(loc, val);
+ break;
case R_PPC64_REL32:
- checkInt(Loc, Val, 32, Type);
- write32(Loc, Val);
+ checkInt(loc, val, 32, type);
+ write32(loc, val);
break;
case R_PPC64_ADDR64:
case R_PPC64_REL64:
case R_PPC64_TOC:
- write64(Loc, Val);
+ write64(loc, val);
break;
case R_PPC64_REL14: {
- uint32_t Mask = 0x0000FFFC;
- checkInt(Loc, Val, 16, Type);
- checkAlignment(Loc, Val, 4, Type);
- write32(Loc, (read32(Loc) & ~Mask) | (Val & Mask));
+ uint32_t mask = 0x0000FFFC;
+ checkInt(loc, val, 16, type);
+ checkAlignment(loc, val, 4, type);
+ write32(loc, (read32(loc) & ~mask) | (val & mask));
break;
}
case R_PPC64_REL24: {
- uint32_t Mask = 0x03FFFFFC;
- checkInt(Loc, Val, 26, Type);
- checkAlignment(Loc, Val, 4, Type);
- write32(Loc, (read32(Loc) & ~Mask) | (Val & Mask));
+ uint32_t mask = 0x03FFFFFC;
+ checkInt(loc, val, 26, type);
+ checkAlignment(loc, val, 4, type);
+ write32(loc, (read32(loc) & ~mask) | (val & mask));
break;
}
case R_PPC64_DTPREL64:
- write64(Loc, Val - DynamicThreadPointerOffset);
+ write64(loc, val - dynamicThreadPointerOffset);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ error(getErrorLocation(loc) + "unrecognized relocation " + toString(type));
}
}
-bool PPC64::needsThunk(RelExpr Expr, RelType Type, const InputFile *File,
- uint64_t BranchAddr, const Symbol &S) const {
- if (Type != R_PPC64_REL14 && Type != R_PPC64_REL24)
+bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file,
+ uint64_t branchAddr, const Symbol &s) const {
+ if (type != R_PPC64_REL14 && type != R_PPC64_REL24)
return false;
// If a function is in the Plt it needs to be called with a call-stub.
- if (S.isInPlt())
+ if (s.isInPlt())
return true;
// If a symbol is a weak undefined and we are compiling an executable
// it doesn't need a range-extending thunk since it can't be called.
- if (S.isUndefWeak() && !Config->Shared)
+ if (s.isUndefWeak() && !config->shared)
return false;
// If the offset exceeds the range of the branch type then it will need
// a range-extending thunk.
// See the comment in getRelocTargetVA() about R_PPC64_CALL.
- return !inBranchRange(Type, BranchAddr,
- S.getVA() +
- getPPC64GlobalEntryToLocalEntryOffset(S.StOther));
+ return !inBranchRange(type, branchAddr,
+ s.getVA() +
+ getPPC64GlobalEntryToLocalEntryOffset(s.stOther));
}
uint32_t PPC64::getThunkSectionSpacing() const {
@@ -771,22 +904,22 @@ uint32_t PPC64::getThunkSectionSpacing() const {
return 0x2000000;
}
-bool PPC64::inBranchRange(RelType Type, uint64_t Src, uint64_t Dst) const {
- int64_t Offset = Dst - Src;
- if (Type == R_PPC64_REL14)
- return isInt<16>(Offset);
- if (Type == R_PPC64_REL24)
- return isInt<26>(Offset);
+bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const {
+ int64_t offset = dst - src;
+ if (type == R_PPC64_REL14)
+ return isInt<16>(offset);
+ if (type == R_PPC64_REL24)
+ return isInt<26>(offset);
llvm_unreachable("unsupported relocation type used in branch");
}
-RelExpr PPC64::adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const {
- if (Expr == R_RELAX_TLS_GD_TO_IE)
+RelExpr PPC64::adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const {
+ if (expr == R_RELAX_TLS_GD_TO_IE)
return R_RELAX_TLS_GD_TO_IE_GOT_OFF;
- if (Expr == R_RELAX_TLS_LD_TO_LE)
+ if (expr == R_RELAX_TLS_LD_TO_LE)
return R_RELAX_TLS_LD_TO_LE_ABS;
- return Expr;
+ return expr;
}
// Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement.
@@ -806,24 +939,25 @@ RelExpr PPC64::adjustRelaxExpr(RelType Type, const uint8_t *Data,
// thread pointer.
// Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is
// used as the relaxation hint for both steps 2 and 3.
-void PPC64::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void PPC64::relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_PPC64_GOT_TLSGD16_HA:
// This is relaxed from addis rT, r2, sym@got@tlsgd@ha to
// addis rT, r2, sym@got@tprel@ha.
- relocateOne(Loc, R_PPC64_GOT_TPREL16_HA, Val);
+ relocateOne(loc, R_PPC64_GOT_TPREL16_HA, val);
return;
+ case R_PPC64_GOT_TLSGD16:
case R_PPC64_GOT_TLSGD16_LO: {
// Relax from addi r3, rA, sym@got@tlsgd@l to
// ld r3, sym@got@tprel@l(rA)
- uint32_t InputRegister = (readInstrFromHalf16(Loc) & (0x1f << 16));
- writeInstrFromHalf16(Loc, 0xE8600000 | InputRegister);
- relocateOne(Loc, R_PPC64_GOT_TPREL16_LO_DS, Val);
+ uint32_t ra = (readFromHalf16(loc) & (0x1f << 16));
+ writeFromHalf16(loc, 0xe8600000 | ra);
+ relocateOne(loc, R_PPC64_GOT_TPREL16_LO_DS, val);
return;
}
case R_PPC64_TLSGD:
- write32(Loc, 0x60000000); // bl __tls_get_addr(sym@tlsgd) --> nop
- write32(Loc + 4, 0x7c636A14); // nop --> add r3, r3, r13
+ write32(loc, 0x60000000); // bl __tls_get_addr(sym@tlsgd) --> nop
+ write32(loc + 4, 0x7c636A14); // nop --> add r3, r3, r13
return;
default:
llvm_unreachable("unsupported relocation for TLS GD to IE relaxation");
@@ -858,86 +992,86 @@ void PPC64::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// pair by split-stack-size-adjust.
// addis r12, r1, ha(-stack-frame size - split-stack-adjust-size)
// addi r12, r12, l(-stack-frame size - split-stack-adjust-size)
-bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *Loc, uint8_t *End,
- uint8_t StOther) const {
+bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
+ uint8_t stOther) const {
// If the caller has a global entry point adjust the buffer past it. The start
// of the split-stack prologue will be at the local entry point.
- Loc += getPPC64GlobalEntryToLocalEntryOffset(StOther);
+ loc += getPPC64GlobalEntryToLocalEntryOffset(stOther);
// At the very least we expect to see a load of some split-stack data from the
// tcb, and 2 instructions that calculate the ending stack address this
// function will require. If there is not enough room for at least 3
// instructions it can't be a split-stack prologue.
- if (Loc + 12 >= End)
+ if (loc + 12 >= end)
return false;
// First instruction must be `ld r0, -0x7000-64(r13)`
- if (read32(Loc) != 0xe80d8fc0)
+ if (read32(loc) != 0xe80d8fc0)
return false;
- int16_t HiImm = 0;
- int16_t LoImm = 0;
+ int16_t hiImm = 0;
+ int16_t loImm = 0;
// First instruction can be either an addis if the frame size is larger then
// 32K, or an addi if the size is less then 32K.
- int32_t FirstInstr = read32(Loc + 4);
- if (getPrimaryOpCode(FirstInstr) == 15) {
- HiImm = FirstInstr & 0xFFFF;
- } else if (getPrimaryOpCode(FirstInstr) == 14) {
- LoImm = FirstInstr & 0xFFFF;
+ int32_t firstInstr = read32(loc + 4);
+ if (getPrimaryOpCode(firstInstr) == 15) {
+ hiImm = firstInstr & 0xFFFF;
+ } else if (getPrimaryOpCode(firstInstr) == 14) {
+ loImm = firstInstr & 0xFFFF;
} else {
return false;
}
// Second instruction is either an addi or a nop. If the first instruction was
// an addi then LoImm is set and the second instruction must be a nop.
- uint32_t SecondInstr = read32(Loc + 8);
- if (!LoImm && getPrimaryOpCode(SecondInstr) == 14) {
- LoImm = SecondInstr & 0xFFFF;
- } else if (SecondInstr != 0x60000000) {
+ uint32_t secondInstr = read32(loc + 8);
+ if (!loImm && getPrimaryOpCode(secondInstr) == 14) {
+ loImm = secondInstr & 0xFFFF;
+ } else if (secondInstr != 0x60000000) {
return false;
}
// The register operands of the first instruction should be the stack-pointer
// (r1) as the input (RA) and r12 as the output (RT). If the second
// instruction is not a nop, then it should use r12 as both input and output.
- auto CheckRegOperands = [](uint32_t Instr, uint8_t ExpectedRT,
- uint8_t ExpectedRA) {
- return ((Instr & 0x3E00000) >> 21 == ExpectedRT) &&
- ((Instr & 0x1F0000) >> 16 == ExpectedRA);
+ auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT,
+ uint8_t expectedRA) {
+ return ((instr & 0x3E00000) >> 21 == expectedRT) &&
+ ((instr & 0x1F0000) >> 16 == expectedRA);
};
- if (!CheckRegOperands(FirstInstr, 12, 1))
+ if (!checkRegOperands(firstInstr, 12, 1))
return false;
- if (SecondInstr != 0x60000000 && !CheckRegOperands(SecondInstr, 12, 12))
+ if (secondInstr != 0x60000000 && !checkRegOperands(secondInstr, 12, 12))
return false;
- int32_t StackFrameSize = (HiImm * 65536) + LoImm;
+ int32_t stackFrameSize = (hiImm * 65536) + loImm;
// Check that the adjusted size doesn't overflow what we can represent with 2
// instructions.
- if (StackFrameSize < Config->SplitStackAdjustSize + INT32_MIN) {
- error(getErrorLocation(Loc) + "split-stack prologue adjustment overflows");
+ if (stackFrameSize < config->splitStackAdjustSize + INT32_MIN) {
+ error(getErrorLocation(loc) + "split-stack prologue adjustment overflows");
return false;
}
- int32_t AdjustedStackFrameSize =
- StackFrameSize - Config->SplitStackAdjustSize;
+ int32_t adjustedStackFrameSize =
+ stackFrameSize - config->splitStackAdjustSize;
- LoImm = AdjustedStackFrameSize & 0xFFFF;
- HiImm = (AdjustedStackFrameSize + 0x8000) >> 16;
- if (HiImm) {
- write32(Loc + 4, 0x3D810000 | (uint16_t)HiImm);
+ loImm = adjustedStackFrameSize & 0xFFFF;
+ hiImm = (adjustedStackFrameSize + 0x8000) >> 16;
+ if (hiImm) {
+ write32(loc + 4, 0x3D810000 | (uint16_t)hiImm);
// If the low immediate is zero the second instruction will be a nop.
- SecondInstr = LoImm ? 0x398C0000 | (uint16_t)LoImm : 0x60000000;
- write32(Loc + 8, SecondInstr);
+ secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : 0x60000000;
+ write32(loc + 8, secondInstr);
} else {
// addi r12, r1, imm
- write32(Loc + 4, (0x39810000) | (uint16_t)LoImm);
- write32(Loc + 8, 0x60000000);
+ write32(loc + 4, (0x39810000) | (uint16_t)loImm);
+ write32(loc + 8, 0x60000000);
}
return true;
}
TargetInfo *elf::getPPC64TargetInfo() {
- static PPC64 Target;
- return &Target;
+ static PPC64 target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/RISCV.cpp b/contrib/llvm/tools/lld/ELF/Arch/RISCV.cpp
index 461e8d35c3e6..6f16ade57177 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/RISCV.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/RISCV.cpp
@@ -1,13 +1,13 @@
//===- RISCV.cpp ----------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "InputFiles.h"
+#include "SyntheticSections.h"
#include "Target.h"
using namespace llvm;
@@ -23,59 +23,207 @@ class RISCV final : public TargetInfo {
public:
RISCV();
uint32_t calcEFlags() const override;
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ void writeGotHeader(uint8_t *buf) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ RelType getDynRel(RelType type) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // end anonymous namespace
-RISCV::RISCV() { NoneRel = R_RISCV_NONE; }
+const uint64_t dtpOffset = 0x800;
+
+enum Op {
+ ADDI = 0x13,
+ AUIPC = 0x17,
+ JALR = 0x67,
+ LD = 0x3003,
+ LW = 0x2003,
+ SRLI = 0x5013,
+ SUB = 0x40000033,
+};
+
+enum Reg {
+ X_RA = 1,
+ X_T0 = 5,
+ X_T1 = 6,
+ X_T2 = 7,
+ X_T3 = 28,
+};
+
+static uint32_t hi20(uint32_t val) { return (val + 0x800) >> 12; }
+static uint32_t lo12(uint32_t val) { return val & 4095; }
+
+static uint32_t itype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t imm) {
+ return op | (rd << 7) | (rs1 << 15) | (imm << 20);
+}
+static uint32_t rtype(uint32_t op, uint32_t rd, uint32_t rs1, uint32_t rs2) {
+ return op | (rd << 7) | (rs1 << 15) | (rs2 << 20);
+}
+static uint32_t utype(uint32_t op, uint32_t rd, uint32_t imm) {
+ return op | (rd << 7) | (imm << 12);
+}
+
+RISCV::RISCV() {
+ copyRel = R_RISCV_COPY;
+ noneRel = R_RISCV_NONE;
+ pltRel = R_RISCV_JUMP_SLOT;
+ relativeRel = R_RISCV_RELATIVE;
+ if (config->is64) {
+ symbolicRel = R_RISCV_64;
+ tlsModuleIndexRel = R_RISCV_TLS_DTPMOD64;
+ tlsOffsetRel = R_RISCV_TLS_DTPREL64;
+ tlsGotRel = R_RISCV_TLS_TPREL64;
+ } else {
+ symbolicRel = R_RISCV_32;
+ tlsModuleIndexRel = R_RISCV_TLS_DTPMOD32;
+ tlsOffsetRel = R_RISCV_TLS_DTPREL32;
+ tlsGotRel = R_RISCV_TLS_TPREL32;
+ }
+ gotRel = symbolicRel;
+
+ // .got[0] = _DYNAMIC
+ gotBaseSymInGotPlt = false;
+ gotHeaderEntriesNum = 1;
+
+ // .got.plt[0] = _dl_runtime_resolve, .got.plt[1] = link_map
+ gotPltHeaderEntriesNum = 2;
-static uint32_t getEFlags(InputFile *F) {
- if (Config->Is64)
- return cast<ObjFile<ELF64LE>>(F)->getObj().getHeader()->e_flags;
- return cast<ObjFile<ELF32LE>>(F)->getObj().getHeader()->e_flags;
+ pltEntrySize = 16;
+ pltHeaderSize = 32;
+}
+
+static uint32_t getEFlags(InputFile *f) {
+ if (config->is64)
+ return cast<ObjFile<ELF64LE>>(f)->getObj().getHeader()->e_flags;
+ return cast<ObjFile<ELF32LE>>(f)->getObj().getHeader()->e_flags;
}
uint32_t RISCV::calcEFlags() const {
- assert(!ObjectFiles.empty());
+ assert(!objectFiles.empty());
- uint32_t Target = getEFlags(ObjectFiles.front());
+ uint32_t target = getEFlags(objectFiles.front());
- for (InputFile *F : ObjectFiles) {
- uint32_t EFlags = getEFlags(F);
- if (EFlags & EF_RISCV_RVC)
- Target |= EF_RISCV_RVC;
+ for (InputFile *f : objectFiles) {
+ uint32_t eflags = getEFlags(f);
+ if (eflags & EF_RISCV_RVC)
+ target |= EF_RISCV_RVC;
- if ((EFlags & EF_RISCV_FLOAT_ABI) != (Target & EF_RISCV_FLOAT_ABI))
- error(toString(F) +
+ if ((eflags & EF_RISCV_FLOAT_ABI) != (target & EF_RISCV_FLOAT_ABI))
+ error(toString(f) +
": cannot link object files with different floating-point ABI");
- if ((EFlags & EF_RISCV_RVE) != (Target & EF_RISCV_RVE))
- error(toString(F) +
+ if ((eflags & EF_RISCV_RVE) != (target & EF_RISCV_RVE))
+ error(toString(f) +
": cannot link object files with different EF_RISCV_RVE");
}
- return Target;
+ return target;
+}
+
+void RISCV::writeGotHeader(uint8_t *buf) const {
+ if (config->is64)
+ write64le(buf, mainPart->dynamic->getVA());
+ else
+ write32le(buf, mainPart->dynamic->getVA());
+}
+
+void RISCV::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ if (config->is64)
+ write64le(buf, in.plt->getVA());
+ else
+ write32le(buf, in.plt->getVA());
+}
+
+void RISCV::writePltHeader(uint8_t *buf) const {
+ // 1: auipc t2, %pcrel_hi(.got.plt)
+ // sub t1, t1, t3
+ // l[wd] t3, %pcrel_lo(1b)(t2); t3 = _dl_runtime_resolve
+ // addi t1, t1, -pltHeaderSize-12; t1 = &.plt[i] - &.plt[0]
+ // addi t0, t2, %pcrel_lo(1b)
+ // srli t1, t1, (rv64?1:2); t1 = &.got.plt[i] - &.got.plt[0]
+ // l[wd] t0, Wordsize(t0); t0 = link_map
+ // jr t3
+ uint32_t offset = in.gotPlt->getVA() - in.plt->getVA();
+ uint32_t load = config->is64 ? LD : LW;
+ write32le(buf + 0, utype(AUIPC, X_T2, hi20(offset)));
+ write32le(buf + 4, rtype(SUB, X_T1, X_T1, X_T3));
+ write32le(buf + 8, itype(load, X_T3, X_T2, lo12(offset)));
+ write32le(buf + 12, itype(ADDI, X_T1, X_T1, -target->pltHeaderSize - 12));
+ write32le(buf + 16, itype(ADDI, X_T0, X_T2, lo12(offset)));
+ write32le(buf + 20, itype(SRLI, X_T1, X_T1, config->is64 ? 1 : 2));
+ write32le(buf + 24, itype(load, X_T0, X_T0, config->wordsize));
+ write32le(buf + 28, itype(JALR, 0, X_T3, 0));
}
-RelExpr RISCV::getRelExpr(const RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+void RISCV::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ // 1: auipc t3, %pcrel_hi(f@.got.plt)
+ // l[wd] t3, %pcrel_lo(1b)(t3)
+ // jalr t1, t3
+ // nop
+ uint32_t offset = gotPltEntryAddr - pltEntryAddr;
+ write32le(buf + 0, utype(AUIPC, X_T3, hi20(offset)));
+ write32le(buf + 4, itype(config->is64 ? LD : LW, X_T3, X_T3, lo12(offset)));
+ write32le(buf + 8, itype(JALR, X_T1, X_T3, 0));
+ write32le(buf + 12, itype(ADDI, 0, 0, 0));
+}
+
+RelType RISCV::getDynRel(RelType type) const {
+ return type == target->symbolicRel ? type
+ : static_cast<RelType>(R_RISCV_NONE);
+}
+
+RelExpr RISCV::getRelExpr(const RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
+ case R_RISCV_ADD8:
+ case R_RISCV_ADD16:
+ case R_RISCV_ADD32:
+ case R_RISCV_ADD64:
+ case R_RISCV_SET6:
+ case R_RISCV_SET8:
+ case R_RISCV_SET16:
+ case R_RISCV_SET32:
+ case R_RISCV_SUB6:
+ case R_RISCV_SUB8:
+ case R_RISCV_SUB16:
+ case R_RISCV_SUB32:
+ case R_RISCV_SUB64:
+ return R_RISCV_ADD;
case R_RISCV_JAL:
case R_RISCV_BRANCH:
- case R_RISCV_CALL:
case R_RISCV_PCREL_HI20:
case R_RISCV_RVC_BRANCH:
case R_RISCV_RVC_JUMP:
case R_RISCV_32_PCREL:
return R_PC;
+ case R_RISCV_CALL:
+ case R_RISCV_CALL_PLT:
+ return R_PLT_PC;
+ case R_RISCV_GOT_HI20:
+ return R_GOT_PC;
case R_RISCV_PCREL_LO12_I:
case R_RISCV_PCREL_LO12_S:
return R_RISCV_PC_INDIRECT;
+ case R_RISCV_TLS_GD_HI20:
+ return R_TLSGD_PC;
+ case R_RISCV_TLS_GOT_HI20:
+ config->hasStaticTlsModel = true;
+ return R_GOT_PC;
+ case R_RISCV_TPREL_HI20:
+ case R_RISCV_TPREL_LO12_I:
+ case R_RISCV_TPREL_LO12_S:
+ return R_TLS;
case R_RISCV_RELAX:
case R_RISCV_ALIGN:
+ case R_RISCV_TPREL_ADD:
return R_HINT;
default:
return R_ABS;
@@ -83,175 +231,190 @@ RelExpr RISCV::getRelExpr(const RelType Type, const Symbol &S,
}
// Extract bits V[Begin:End], where range is inclusive, and Begin must be < 63.
-static uint32_t extractBits(uint64_t V, uint32_t Begin, uint32_t End) {
- return (V & ((1ULL << (Begin + 1)) - 1)) >> End;
+static uint32_t extractBits(uint64_t v, uint32_t begin, uint32_t end) {
+ return (v & ((1ULL << (begin + 1)) - 1)) >> end;
}
-void RISCV::relocateOne(uint8_t *Loc, const RelType Type,
- const uint64_t Val) const {
- switch (Type) {
+void RISCV::relocateOne(uint8_t *loc, const RelType type,
+ const uint64_t val) const {
+ const unsigned bits = config->wordsize * 8;
+
+ switch (type) {
case R_RISCV_32:
- write32le(Loc, Val);
+ write32le(loc, val);
return;
case R_RISCV_64:
- write64le(Loc, Val);
+ write64le(loc, val);
return;
case R_RISCV_RVC_BRANCH: {
- checkInt(Loc, static_cast<int64_t>(Val) >> 1, 8, Type);
- checkAlignment(Loc, Val, 2, Type);
- uint16_t Insn = read16le(Loc) & 0xE383;
- uint16_t Imm8 = extractBits(Val, 8, 8) << 12;
- uint16_t Imm4_3 = extractBits(Val, 4, 3) << 10;
- uint16_t Imm7_6 = extractBits(Val, 7, 6) << 5;
- uint16_t Imm2_1 = extractBits(Val, 2, 1) << 3;
- uint16_t Imm5 = extractBits(Val, 5, 5) << 2;
- Insn |= Imm8 | Imm4_3 | Imm7_6 | Imm2_1 | Imm5;
-
- write16le(Loc, Insn);
+ checkInt(loc, static_cast<int64_t>(val) >> 1, 8, type);
+ checkAlignment(loc, val, 2, type);
+ uint16_t insn = read16le(loc) & 0xE383;
+ uint16_t imm8 = extractBits(val, 8, 8) << 12;
+ uint16_t imm4_3 = extractBits(val, 4, 3) << 10;
+ uint16_t imm7_6 = extractBits(val, 7, 6) << 5;
+ uint16_t imm2_1 = extractBits(val, 2, 1) << 3;
+ uint16_t imm5 = extractBits(val, 5, 5) << 2;
+ insn |= imm8 | imm4_3 | imm7_6 | imm2_1 | imm5;
+
+ write16le(loc, insn);
return;
}
case R_RISCV_RVC_JUMP: {
- checkInt(Loc, static_cast<int64_t>(Val) >> 1, 11, Type);
- checkAlignment(Loc, Val, 2, Type);
- uint16_t Insn = read16le(Loc) & 0xE003;
- uint16_t Imm11 = extractBits(Val, 11, 11) << 12;
- uint16_t Imm4 = extractBits(Val, 4, 4) << 11;
- uint16_t Imm9_8 = extractBits(Val, 9, 8) << 9;
- uint16_t Imm10 = extractBits(Val, 10, 10) << 8;
- uint16_t Imm6 = extractBits(Val, 6, 6) << 7;
- uint16_t Imm7 = extractBits(Val, 7, 7) << 6;
- uint16_t Imm3_1 = extractBits(Val, 3, 1) << 3;
- uint16_t Imm5 = extractBits(Val, 5, 5) << 2;
- Insn |= Imm11 | Imm4 | Imm9_8 | Imm10 | Imm6 | Imm7 | Imm3_1 | Imm5;
-
- write16le(Loc, Insn);
+ checkInt(loc, static_cast<int64_t>(val) >> 1, 11, type);
+ checkAlignment(loc, val, 2, type);
+ uint16_t insn = read16le(loc) & 0xE003;
+ uint16_t imm11 = extractBits(val, 11, 11) << 12;
+ uint16_t imm4 = extractBits(val, 4, 4) << 11;
+ uint16_t imm9_8 = extractBits(val, 9, 8) << 9;
+ uint16_t imm10 = extractBits(val, 10, 10) << 8;
+ uint16_t imm6 = extractBits(val, 6, 6) << 7;
+ uint16_t imm7 = extractBits(val, 7, 7) << 6;
+ uint16_t imm3_1 = extractBits(val, 3, 1) << 3;
+ uint16_t imm5 = extractBits(val, 5, 5) << 2;
+ insn |= imm11 | imm4 | imm9_8 | imm10 | imm6 | imm7 | imm3_1 | imm5;
+
+ write16le(loc, insn);
return;
}
case R_RISCV_RVC_LUI: {
- int32_t Imm = ((Val + 0x800) >> 12);
- checkUInt(Loc, Imm, 6, Type);
- if (Imm == 0) { // `c.lui rd, 0` is illegal, convert to `c.li rd, 0`
- write16le(Loc, (read16le(Loc) & 0x0F83) | 0x4000);
+ int64_t imm = SignExtend64(val + 0x800, bits) >> 12;
+ checkInt(loc, imm, 6, type);
+ if (imm == 0) { // `c.lui rd, 0` is illegal, convert to `c.li rd, 0`
+ write16le(loc, (read16le(loc) & 0x0F83) | 0x4000);
} else {
- uint16_t Imm17 = extractBits(Val + 0x800, 17, 17) << 12;
- uint16_t Imm16_12 = extractBits(Val + 0x800, 16, 12) << 2;
- write16le(Loc, (read16le(Loc) & 0xEF83) | Imm17 | Imm16_12);
+ uint16_t imm17 = extractBits(val + 0x800, 17, 17) << 12;
+ uint16_t imm16_12 = extractBits(val + 0x800, 16, 12) << 2;
+ write16le(loc, (read16le(loc) & 0xEF83) | imm17 | imm16_12);
}
return;
}
case R_RISCV_JAL: {
- checkInt(Loc, static_cast<int64_t>(Val) >> 1, 20, Type);
- checkAlignment(Loc, Val, 2, Type);
+ checkInt(loc, static_cast<int64_t>(val) >> 1, 20, type);
+ checkAlignment(loc, val, 2, type);
- uint32_t Insn = read32le(Loc) & 0xFFF;
- uint32_t Imm20 = extractBits(Val, 20, 20) << 31;
- uint32_t Imm10_1 = extractBits(Val, 10, 1) << 21;
- uint32_t Imm11 = extractBits(Val, 11, 11) << 20;
- uint32_t Imm19_12 = extractBits(Val, 19, 12) << 12;
- Insn |= Imm20 | Imm10_1 | Imm11 | Imm19_12;
+ uint32_t insn = read32le(loc) & 0xFFF;
+ uint32_t imm20 = extractBits(val, 20, 20) << 31;
+ uint32_t imm10_1 = extractBits(val, 10, 1) << 21;
+ uint32_t imm11 = extractBits(val, 11, 11) << 20;
+ uint32_t imm19_12 = extractBits(val, 19, 12) << 12;
+ insn |= imm20 | imm10_1 | imm11 | imm19_12;
- write32le(Loc, Insn);
+ write32le(loc, insn);
return;
}
case R_RISCV_BRANCH: {
- checkInt(Loc, static_cast<int64_t>(Val) >> 1, 12, Type);
- checkAlignment(Loc, Val, 2, Type);
+ checkInt(loc, static_cast<int64_t>(val) >> 1, 12, type);
+ checkAlignment(loc, val, 2, type);
- uint32_t Insn = read32le(Loc) & 0x1FFF07F;
- uint32_t Imm12 = extractBits(Val, 12, 12) << 31;
- uint32_t Imm10_5 = extractBits(Val, 10, 5) << 25;
- uint32_t Imm4_1 = extractBits(Val, 4, 1) << 8;
- uint32_t Imm11 = extractBits(Val, 11, 11) << 7;
- Insn |= Imm12 | Imm10_5 | Imm4_1 | Imm11;
+ uint32_t insn = read32le(loc) & 0x1FFF07F;
+ uint32_t imm12 = extractBits(val, 12, 12) << 31;
+ uint32_t imm10_5 = extractBits(val, 10, 5) << 25;
+ uint32_t imm4_1 = extractBits(val, 4, 1) << 8;
+ uint32_t imm11 = extractBits(val, 11, 11) << 7;
+ insn |= imm12 | imm10_5 | imm4_1 | imm11;
- write32le(Loc, Insn);
+ write32le(loc, insn);
return;
}
// auipc + jalr pair
- case R_RISCV_CALL: {
- checkInt(Loc, Val, 32, Type);
- if (isInt<32>(Val)) {
- relocateOne(Loc, R_RISCV_PCREL_HI20, Val);
- relocateOne(Loc + 4, R_RISCV_PCREL_LO12_I, Val);
+ case R_RISCV_CALL:
+ case R_RISCV_CALL_PLT: {
+ int64_t hi = SignExtend64(val + 0x800, bits) >> 12;
+ checkInt(loc, hi, 20, type);
+ if (isInt<20>(hi)) {
+ relocateOne(loc, R_RISCV_PCREL_HI20, val);
+ relocateOne(loc + 4, R_RISCV_PCREL_LO12_I, val);
}
return;
}
+ case R_RISCV_GOT_HI20:
case R_RISCV_PCREL_HI20:
+ case R_RISCV_TLS_GD_HI20:
+ case R_RISCV_TLS_GOT_HI20:
+ case R_RISCV_TPREL_HI20:
case R_RISCV_HI20: {
- checkInt(Loc, Val, 32, Type);
- uint32_t Hi = Val + 0x800;
- write32le(Loc, (read32le(Loc) & 0xFFF) | (Hi & 0xFFFFF000));
+ uint64_t hi = val + 0x800;
+ checkInt(loc, SignExtend64(hi, bits) >> 12, 20, type);
+ write32le(loc, (read32le(loc) & 0xFFF) | (hi & 0xFFFFF000));
return;
}
case R_RISCV_PCREL_LO12_I:
+ case R_RISCV_TPREL_LO12_I:
case R_RISCV_LO12_I: {
- checkInt(Loc, Val, 32, Type);
- uint32_t Hi = Val + 0x800;
- uint32_t Lo = Val - (Hi & 0xFFFFF000);
- write32le(Loc, (read32le(Loc) & 0xFFFFF) | ((Lo & 0xFFF) << 20));
+ uint64_t hi = (val + 0x800) >> 12;
+ uint64_t lo = val - (hi << 12);
+ write32le(loc, (read32le(loc) & 0xFFFFF) | ((lo & 0xFFF) << 20));
return;
}
case R_RISCV_PCREL_LO12_S:
+ case R_RISCV_TPREL_LO12_S:
case R_RISCV_LO12_S: {
- checkInt(Loc, Val, 32, Type);
- uint32_t Hi = Val + 0x800;
- uint32_t Lo = Val - (Hi & 0xFFFFF000);
- uint32_t Imm11_5 = extractBits(Lo, 11, 5) << 25;
- uint32_t Imm4_0 = extractBits(Lo, 4, 0) << 7;
- write32le(Loc, (read32le(Loc) & 0x1FFF07F) | Imm11_5 | Imm4_0);
+ uint64_t hi = (val + 0x800) >> 12;
+ uint64_t lo = val - (hi << 12);
+ uint32_t imm11_5 = extractBits(lo, 11, 5) << 25;
+ uint32_t imm4_0 = extractBits(lo, 4, 0) << 7;
+ write32le(loc, (read32le(loc) & 0x1FFF07F) | imm11_5 | imm4_0);
return;
}
case R_RISCV_ADD8:
- *Loc += Val;
+ *loc += val;
return;
case R_RISCV_ADD16:
- write16le(Loc, read16le(Loc) + Val);
+ write16le(loc, read16le(loc) + val);
return;
case R_RISCV_ADD32:
- write32le(Loc, read32le(Loc) + Val);
+ write32le(loc, read32le(loc) + val);
return;
case R_RISCV_ADD64:
- write64le(Loc, read64le(Loc) + Val);
+ write64le(loc, read64le(loc) + val);
return;
case R_RISCV_SUB6:
- *Loc = (*Loc & 0xc0) | (((*Loc & 0x3f) - Val) & 0x3f);
+ *loc = (*loc & 0xc0) | (((*loc & 0x3f) - val) & 0x3f);
return;
case R_RISCV_SUB8:
- *Loc -= Val;
+ *loc -= val;
return;
case R_RISCV_SUB16:
- write16le(Loc, read16le(Loc) - Val);
+ write16le(loc, read16le(loc) - val);
return;
case R_RISCV_SUB32:
- write32le(Loc, read32le(Loc) - Val);
+ write32le(loc, read32le(loc) - val);
return;
case R_RISCV_SUB64:
- write64le(Loc, read64le(Loc) - Val);
+ write64le(loc, read64le(loc) - val);
return;
case R_RISCV_SET6:
- *Loc = (*Loc & 0xc0) | (Val & 0x3f);
+ *loc = (*loc & 0xc0) | (val & 0x3f);
return;
case R_RISCV_SET8:
- *Loc = Val;
+ *loc = val;
return;
case R_RISCV_SET16:
- write16le(Loc, Val);
+ write16le(loc, val);
return;
case R_RISCV_SET32:
case R_RISCV_32_PCREL:
- write32le(Loc, Val);
+ write32le(loc, val);
return;
+ case R_RISCV_TLS_DTPREL32:
+ write32le(loc, val - dtpOffset);
+ break;
+ case R_RISCV_TLS_DTPREL64:
+ write64le(loc, val - dtpOffset);
+ break;
+
case R_RISCV_ALIGN:
case R_RISCV_RELAX:
return; // Ignored (for now)
@@ -267,13 +430,13 @@ void RISCV::relocateOne(uint8_t *Loc, const RelType Type,
case R_RISCV_GPREL_I:
case R_RISCV_GPREL_S:
default:
- error(getErrorLocation(Loc) +
- "unimplemented relocation: " + toString(Type));
+ error(getErrorLocation(loc) +
+ "unimplemented relocation: " + toString(type));
return;
}
}
TargetInfo *elf::getRISCVTargetInfo() {
- static RISCV Target;
- return &Target;
+ static RISCV target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/SPARCV9.cpp b/contrib/llvm/tools/lld/ELF/Arch/SPARCV9.cpp
index 831aa2028e7f..5299206dd919 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/SPARCV9.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/SPARCV9.cpp
@@ -1,9 +1,8 @@
//===- SPARCV9.cpp --------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -24,32 +23,32 @@ namespace {
class SPARCV9 final : public TargetInfo {
public:
SPARCV9();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- void writePlt(uint8_t *Buf, uint64_t GotEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ void writePlt(uint8_t *buf, uint64_t gotEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
SPARCV9::SPARCV9() {
- CopyRel = R_SPARC_COPY;
- GotRel = R_SPARC_GLOB_DAT;
- NoneRel = R_SPARC_NONE;
- PltRel = R_SPARC_JMP_SLOT;
- RelativeRel = R_SPARC_RELATIVE;
- GotEntrySize = 8;
- PltEntrySize = 32;
- PltHeaderSize = 4 * PltEntrySize;
+ copyRel = R_SPARC_COPY;
+ gotRel = R_SPARC_GLOB_DAT;
+ noneRel = R_SPARC_NONE;
+ pltRel = R_SPARC_JMP_SLOT;
+ relativeRel = R_SPARC_RELATIVE;
+ symbolicRel = R_SPARC_64;
+ pltEntrySize = 32;
+ pltHeaderSize = 4 * pltEntrySize;
- PageSize = 8192;
- DefaultMaxPageSize = 0x100000;
- DefaultImageBase = 0x100000;
+ defaultCommonPageSize = 8192;
+ defaultMaxPageSize = 0x100000;
+ defaultImageBase = 0x100000;
}
-RelExpr SPARCV9::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- switch (Type) {
+RelExpr SPARCV9::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ switch (type) {
case R_SPARC_32:
case R_SPARC_UA32:
case R_SPARC_64:
@@ -69,64 +68,65 @@ RelExpr SPARCV9::getRelExpr(RelType Type, const Symbol &S,
case R_SPARC_NONE:
return R_NONE;
default:
- return R_INVALID;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
-void SPARCV9::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void SPARCV9::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_SPARC_32:
case R_SPARC_UA32:
// V-word32
- checkUInt(Loc, Val, 32, Type);
- write32be(Loc, Val);
+ checkUInt(loc, val, 32, type);
+ write32be(loc, val);
break;
case R_SPARC_DISP32:
// V-disp32
- checkInt(Loc, Val, 32, Type);
- write32be(Loc, Val);
+ checkInt(loc, val, 32, type);
+ write32be(loc, val);
break;
case R_SPARC_WDISP30:
case R_SPARC_WPLT30:
// V-disp30
- checkInt(Loc, Val, 32, Type);
- write32be(Loc, (read32be(Loc) & ~0x3fffffff) | ((Val >> 2) & 0x3fffffff));
+ checkInt(loc, val, 32, type);
+ write32be(loc, (read32be(loc) & ~0x3fffffff) | ((val >> 2) & 0x3fffffff));
break;
case R_SPARC_22:
// V-imm22
- checkUInt(Loc, Val, 22, Type);
- write32be(Loc, (read32be(Loc) & ~0x003fffff) | (Val & 0x003fffff));
+ checkUInt(loc, val, 22, type);
+ write32be(loc, (read32be(loc) & ~0x003fffff) | (val & 0x003fffff));
break;
case R_SPARC_GOT22:
case R_SPARC_PC22:
// T-imm22
- write32be(Loc, (read32be(Loc) & ~0x003fffff) | ((Val >> 10) & 0x003fffff));
+ write32be(loc, (read32be(loc) & ~0x003fffff) | ((val >> 10) & 0x003fffff));
break;
case R_SPARC_WDISP19:
// V-disp19
- checkInt(Loc, Val, 21, Type);
- write32be(Loc, (read32be(Loc) & ~0x0007ffff) | ((Val >> 2) & 0x0007ffff));
+ checkInt(loc, val, 21, type);
+ write32be(loc, (read32be(loc) & ~0x0007ffff) | ((val >> 2) & 0x0007ffff));
break;
case R_SPARC_GOT10:
case R_SPARC_PC10:
// T-simm10
- write32be(Loc, (read32be(Loc) & ~0x000003ff) | (Val & 0x000003ff));
+ write32be(loc, (read32be(loc) & ~0x000003ff) | (val & 0x000003ff));
break;
case R_SPARC_64:
case R_SPARC_UA64:
- case R_SPARC_GLOB_DAT:
// V-xword64
- write64be(Loc, Val);
+ write64be(loc, val);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unknown relocation");
}
}
-void SPARCV9::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t PltData[] = {
+void SPARCV9::writePlt(uint8_t *buf, uint64_t gotEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t pltData[] = {
0x03, 0x00, 0x00, 0x00, // sethi (. - .PLT0), %g1
0x30, 0x68, 0x00, 0x00, // ba,a %xcc, .PLT1
0x01, 0x00, 0x00, 0x00, // nop
@@ -136,14 +136,14 @@ void SPARCV9::writePlt(uint8_t *Buf, uint64_t GotEntryAddr,
0x01, 0x00, 0x00, 0x00, // nop
0x01, 0x00, 0x00, 0x00 // nop
};
- memcpy(Buf, PltData, sizeof(PltData));
+ memcpy(buf, pltData, sizeof(pltData));
- uint64_t Off = getPltEntryOffset(Index);
- relocateOne(Buf, R_SPARC_22, Off);
- relocateOne(Buf + 4, R_SPARC_WDISP19, -(Off + 4 - PltEntrySize));
+ uint64_t off = pltHeaderSize + pltEntrySize * index;
+ relocateOne(buf, R_SPARC_22, off);
+ relocateOne(buf + 4, R_SPARC_WDISP19, -(off + 4 - pltEntrySize));
}
TargetInfo *elf::getSPARCV9TargetInfo() {
- static SPARCV9 Target;
- return &Target;
+ static SPARCV9 target;
+ return &target;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/X86.cpp b/contrib/llvm/tools/lld/ELF/Arch/X86.cpp
index b9a697277cc1..e1dd231e8e8d 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/X86.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/X86.cpp
@@ -1,9 +1,8 @@
//===- X86.cpp ------------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -24,71 +23,73 @@ namespace {
class X86 : public TargetInfo {
public:
X86();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- int64_t getImplicitAddend(const uint8_t *Buf, RelType Type) const override;
- void writeGotPltHeader(uint8_t *Buf) const override;
- RelType getDynRel(RelType Type) const override;
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writeIgotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
-
- RelExpr adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const override;
- void relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
+ int getTlsGdRelaxSkip(RelType type) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override;
+ void writeGotPltHeader(uint8_t *buf) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writeIgotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+
+ RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const override;
+ void relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
};
} // namespace
X86::X86() {
- CopyRel = R_386_COPY;
- GotRel = R_386_GLOB_DAT;
- NoneRel = R_386_NONE;
- PltRel = R_386_JUMP_SLOT;
- IRelativeRel = R_386_IRELATIVE;
- RelativeRel = R_386_RELATIVE;
- TlsGotRel = R_386_TLS_TPOFF;
- TlsModuleIndexRel = R_386_TLS_DTPMOD32;
- TlsOffsetRel = R_386_TLS_DTPOFF32;
- GotEntrySize = 4;
- GotPltEntrySize = 4;
- PltEntrySize = 16;
- PltHeaderSize = 16;
- TlsGdRelaxSkip = 2;
- TrapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3
+ copyRel = R_386_COPY;
+ gotRel = R_386_GLOB_DAT;
+ noneRel = R_386_NONE;
+ pltRel = R_386_JUMP_SLOT;
+ iRelativeRel = R_386_IRELATIVE;
+ relativeRel = R_386_RELATIVE;
+ symbolicRel = R_386_32;
+ tlsGotRel = R_386_TLS_TPOFF;
+ tlsModuleIndexRel = R_386_TLS_DTPMOD32;
+ tlsOffsetRel = R_386_TLS_DTPOFF32;
+ pltEntrySize = 16;
+ pltHeaderSize = 16;
+ trapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3
// Align to the non-PAE large page size (known as a superpage or huge page).
// FreeBSD automatically promotes large, superpage-aligned allocations.
- DefaultImageBase = 0x400000;
+ defaultImageBase = 0x400000;
}
-static bool hasBaseReg(uint8_t ModRM) { return (ModRM & 0xc7) != 0x5; }
+int X86::getTlsGdRelaxSkip(RelType type) const {
+ return 2;
+}
-RelExpr X86::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
+RelExpr X86::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
// There are 4 different TLS variable models with varying degrees of
// flexibility and performance. LocalExec and InitialExec models are fast but
// less-flexible models. If they are in use, we set DF_STATIC_TLS flag in the
// dynamic section to let runtime know about that.
- if (Type == R_386_TLS_LE || Type == R_386_TLS_LE_32 || Type == R_386_TLS_IE ||
- Type == R_386_TLS_GOTIE)
- Config->HasStaticTlsModel = true;
+ if (type == R_386_TLS_LE || type == R_386_TLS_LE_32 || type == R_386_TLS_IE ||
+ type == R_386_TLS_GOTIE)
+ config->hasStaticTlsModel = true;
- switch (Type) {
+ switch (type) {
case R_386_8:
case R_386_16:
case R_386_32:
- case R_386_TLS_LDO_32:
return R_ABS;
+ case R_386_TLS_LDO_32:
+ return R_DTPREL;
case R_386_TLS_GD:
- return R_TLSGD_GOT_FROM_END;
+ return R_TLSGD_GOTPLT;
case R_386_TLS_LDM:
- return R_TLSLD_GOT_FROM_END;
+ return R_TLSLD_GOTPLT;
case R_386_PLT32:
return R_PLT_PC;
case R_386_PC8:
@@ -96,7 +97,7 @@ RelExpr X86::getRelExpr(RelType Type, const Symbol &S,
case R_386_PC32:
return R_PC;
case R_386_GOTPC:
- return R_GOTONLY_PC_FROM_END;
+ return R_GOTPLTONLY_PC;
case R_386_TLS_IE:
return R_GOT;
case R_386_GOT32:
@@ -116,14 +117,14 @@ RelExpr X86::getRelExpr(RelType Type, const Symbol &S,
// load an GOT address to a register, which is usually %ebx.
//
// So, there are two ways to refer to symbol foo's GOT entry: foo@GOT or
- // foo@GOT(%reg).
+ // foo@GOT(%ebx).
//
// foo@GOT is not usable in PIC. If we are creating a PIC output and if we
// find such relocation, we should report an error. foo@GOT is resolved to
// an *absolute* address of foo's GOT entry, because both GOT address and
// foo's offset are known. In other words, it's G + A.
//
- // foo@GOT(%reg) needs to be resolved to a *relative* offset from a GOT to
+ // foo@GOT(%ebx) needs to be resolved to a *relative* offset from a GOT to
// foo's GOT entry in the table, because GOT address is not known but foo's
// offset in the table is known. It's G + A - GOT.
//
@@ -131,16 +132,16 @@ RelExpr X86::getRelExpr(RelType Type, const Symbol &S,
// different use cases. In order to distinguish them, we have to read a
// machine instruction.
//
- // The following code implements it. We assume that Loc[0] is the first
- // byte of a displacement or an immediate field of a valid machine
+ // The following code implements it. We assume that Loc[0] is the first byte
+ // of a displacement or an immediate field of a valid machine
// instruction. That means a ModRM byte is at Loc[-1]. By taking a look at
- // the byte, we can determine whether the instruction is register-relative
- // (i.e. it was generated for foo@GOT(%reg)) or absolute (i.e. foo@GOT).
- return hasBaseReg(Loc[-1]) ? R_GOT_FROM_END : R_GOT;
+ // the byte, we can determine whether the instruction uses the operand as an
+ // absolute address (R_GOT) or a register-relative address (R_GOTPLT).
+ return (loc[-1] & 0xc7) == 0x5 ? R_GOT : R_GOTPLT;
case R_386_TLS_GOTIE:
- return R_GOT_FROM_END;
+ return R_GOTPLT;
case R_386_GOTOFF:
- return R_GOTREL_FROM_END;
+ return R_GOTPLTREL;
case R_386_TLS_LE:
return R_TLS;
case R_386_TLS_LE_32:
@@ -148,105 +149,102 @@ RelExpr X86::getRelExpr(RelType Type, const Symbol &S,
case R_386_NONE:
return R_NONE;
default:
- return R_INVALID;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
-RelExpr X86::adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const {
- switch (Expr) {
+RelExpr X86::adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const {
+ switch (expr) {
default:
- return Expr;
+ return expr;
case R_RELAX_TLS_GD_TO_IE:
- return R_RELAX_TLS_GD_TO_IE_END;
+ return R_RELAX_TLS_GD_TO_IE_GOTPLT;
case R_RELAX_TLS_GD_TO_LE:
return R_RELAX_TLS_GD_TO_LE_NEG;
}
}
-void X86::writeGotPltHeader(uint8_t *Buf) const {
- write32le(Buf, In.Dynamic->getVA());
+void X86::writeGotPltHeader(uint8_t *buf) const {
+ write32le(buf, mainPart->dynamic->getVA());
}
-void X86::writeGotPlt(uint8_t *Buf, const Symbol &S) const {
+void X86::writeGotPlt(uint8_t *buf, const Symbol &s) const {
// Entries in .got.plt initially points back to the corresponding
// PLT entries with a fixed offset to skip the first instruction.
- write32le(Buf, S.getPltVA() + 6);
+ write32le(buf, s.getPltVA() + 6);
}
-void X86::writeIgotPlt(uint8_t *Buf, const Symbol &S) const {
+void X86::writeIgotPlt(uint8_t *buf, const Symbol &s) const {
// An x86 entry is the address of the ifunc resolver function.
- write32le(Buf, S.getVA());
+ write32le(buf, s.getVA());
}
-RelType X86::getDynRel(RelType Type) const {
- if (Type == R_386_TLS_LE)
+RelType X86::getDynRel(RelType type) const {
+ if (type == R_386_TLS_LE)
return R_386_TLS_TPOFF;
- if (Type == R_386_TLS_LE_32)
+ if (type == R_386_TLS_LE_32)
return R_386_TLS_TPOFF32;
- return Type;
+ return type;
}
-void X86::writePltHeader(uint8_t *Buf) const {
- if (Config->Pic) {
- const uint8_t V[] = {
- 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl GOTPLT+4(%ebx)
- 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *GOTPLT+8(%ebx)
+void X86::writePltHeader(uint8_t *buf) const {
+ if (config->isPic) {
+ const uint8_t v[] = {
+ 0xff, 0xb3, 0x04, 0x00, 0x00, 0x00, // pushl 4(%ebx)
+ 0xff, 0xa3, 0x08, 0x00, 0x00, 0x00, // jmp *8(%ebx)
0x90, 0x90, 0x90, 0x90 // nop
};
- memcpy(Buf, V, sizeof(V));
-
- uint32_t Ebx = In.Got->getVA() + In.Got->getSize();
- uint32_t GotPlt = In.GotPlt->getVA() - Ebx;
- write32le(Buf + 2, GotPlt + 4);
- write32le(Buf + 8, GotPlt + 8);
+ memcpy(buf, v, sizeof(v));
return;
}
- const uint8_t PltData[] = {
+ const uint8_t pltData[] = {
0xff, 0x35, 0, 0, 0, 0, // pushl (GOTPLT+4)
0xff, 0x25, 0, 0, 0, 0, // jmp *(GOTPLT+8)
0x90, 0x90, 0x90, 0x90, // nop
};
- memcpy(Buf, PltData, sizeof(PltData));
- uint32_t GotPlt = In.GotPlt->getVA();
- write32le(Buf + 2, GotPlt + 4);
- write32le(Buf + 8, GotPlt + 8);
+ memcpy(buf, pltData, sizeof(pltData));
+ uint32_t gotPlt = in.gotPlt->getVA();
+ write32le(buf + 2, gotPlt + 4);
+ write32le(buf + 8, gotPlt + 8);
}
-void X86::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Inst[] = {
- 0xff, 0x00, 0, 0, 0, 0, // jmp *foo_in_GOT or jmp *foo@GOT(%ebx)
- 0x68, 0, 0, 0, 0, // pushl $reloc_offset
- 0xe9, 0, 0, 0, 0, // jmp .PLT0@PC
- };
- memcpy(Buf, Inst, sizeof(Inst));
-
- if (Config->Pic) {
- // jmp *foo@GOT(%ebx)
- uint32_t Ebx = In.Got->getVA() + In.Got->getSize();
- Buf[1] = 0xa3;
- write32le(Buf + 2, GotPltEntryAddr - Ebx);
+void X86::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ if (config->isPic) {
+ const uint8_t inst[] = {
+ 0xff, 0xa3, 0, 0, 0, 0, // jmp *foo@GOT(%ebx)
+ 0x68, 0, 0, 0, 0, // pushl $reloc_offset
+ 0xe9, 0, 0, 0, 0, // jmp .PLT0@PC
+ };
+ memcpy(buf, inst, sizeof(inst));
+ write32le(buf + 2, gotPltEntryAddr - in.gotPlt->getVA());
} else {
- // jmp *foo_in_GOT
- Buf[1] = 0x25;
- write32le(Buf + 2, GotPltEntryAddr);
+ const uint8_t inst[] = {
+ 0xff, 0x25, 0, 0, 0, 0, // jmp *foo@GOT
+ 0x68, 0, 0, 0, 0, // pushl $reloc_offset
+ 0xe9, 0, 0, 0, 0, // jmp .PLT0@PC
+ };
+ memcpy(buf, inst, sizeof(inst));
+ write32le(buf + 2, gotPltEntryAddr);
}
- write32le(Buf + 7, RelOff);
- write32le(Buf + 12, -getPltEntryOffset(Index) - 16);
+ write32le(buf + 7, relOff);
+ write32le(buf + 12, -pltHeaderSize - pltEntrySize * index - 16);
}
-int64_t X86::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
- switch (Type) {
+int64_t X86::getImplicitAddend(const uint8_t *buf, RelType type) const {
+ switch (type) {
case R_386_8:
case R_386_PC8:
- return SignExtend64<8>(*Buf);
+ return SignExtend64<8>(*buf);
case R_386_16:
case R_386_PC16:
- return SignExtend64<16>(read16le(Buf));
+ return SignExtend64<16>(read16le(buf));
case R_386_32:
case R_386_GOT32:
case R_386_GOT32X:
@@ -256,28 +254,28 @@ int64_t X86::getImplicitAddend(const uint8_t *Buf, RelType Type) const {
case R_386_PLT32:
case R_386_TLS_LDO_32:
case R_386_TLS_LE:
- return SignExtend64<32>(read32le(Buf));
+ return SignExtend64<32>(read32le(buf));
default:
return 0;
}
}
-void X86::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void X86::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_386_8:
// R_386_{PC,}{8,16} are not part of the i386 psABI, but they are
// being used for some 16-bit programs such as boot loaders, so
// we want to support them.
- checkIntUInt(Loc, Val, 8, Type);
- *Loc = Val;
+ checkIntUInt(loc, val, 8, type);
+ *loc = val;
break;
case R_386_PC8:
- checkInt(Loc, Val, 8, Type);
- *Loc = Val;
+ checkInt(loc, val, 8, type);
+ *loc = val;
break;
case R_386_16:
- checkIntUInt(Loc, Val, 16, Type);
- write16le(Loc, Val);
+ checkIntUInt(loc, val, 16, type);
+ write16le(loc, val);
break;
case R_386_PC16:
// R_386_PC16 is normally used with 16 bit code. In that situation
@@ -290,11 +288,10 @@ void X86::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
// current location subtracted from it.
// We just check that Val fits in 17 bits. This misses some cases, but
// should have no false positives.
- checkInt(Loc, Val, 17, Type);
- write16le(Loc, Val);
+ checkInt(loc, val, 17, type);
+ write16le(loc, val);
break;
case R_386_32:
- case R_386_GLOB_DAT:
case R_386_GOT32:
case R_386_GOT32X:
case R_386_GOTOFF:
@@ -313,86 +310,86 @@ void X86::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_386_TLS_LE_32:
case R_386_TLS_TPOFF:
case R_386_TLS_TPOFF32:
- checkInt(Loc, Val, 32, Type);
- write32le(Loc, Val);
+ checkInt(loc, val, 32, type);
+ write32le(loc, val);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unknown relocation");
}
}
-void X86::relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void X86::relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const {
// Convert
// leal x@tlsgd(, %ebx, 1),
// call __tls_get_addr@plt
// to
// movl %gs:0,%eax
// subl $x@ntpoff,%eax
- const uint8_t Inst[] = {
+ const uint8_t inst[] = {
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
0x81, 0xe8, 0, 0, 0, 0, // subl Val(%ebx), %eax
};
- memcpy(Loc - 3, Inst, sizeof(Inst));
- write32le(Loc + 5, Val);
+ memcpy(loc - 3, inst, sizeof(inst));
+ write32le(loc + 5, val);
}
-void X86::relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void X86::relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const {
// Convert
// leal x@tlsgd(, %ebx, 1),
// call __tls_get_addr@plt
// to
// movl %gs:0, %eax
// addl x@gotntpoff(%ebx), %eax
- const uint8_t Inst[] = {
+ const uint8_t inst[] = {
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0, %eax
0x03, 0x83, 0, 0, 0, 0, // addl Val(%ebx), %eax
};
- memcpy(Loc - 3, Inst, sizeof(Inst));
- write32le(Loc + 5, Val);
+ memcpy(loc - 3, inst, sizeof(inst));
+ write32le(loc + 5, val);
}
// In some conditions, relocations can be optimized to avoid using GOT.
// This function does that for Initial Exec to Local Exec case.
-void X86::relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
+void X86::relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const {
// Ulrich's document section 6.2 says that @gotntpoff can
// be used with MOVL or ADDL instructions.
// @indntpoff is similar to @gotntpoff, but for use in
// position dependent code.
- uint8_t Reg = (Loc[-1] >> 3) & 7;
+ uint8_t reg = (loc[-1] >> 3) & 7;
- if (Type == R_386_TLS_IE) {
- if (Loc[-1] == 0xa1) {
+ if (type == R_386_TLS_IE) {
+ if (loc[-1] == 0xa1) {
// "movl foo@indntpoff,%eax" -> "movl $foo,%eax"
// This case is different from the generic case below because
// this is a 5 byte instruction while below is 6 bytes.
- Loc[-1] = 0xb8;
- } else if (Loc[-2] == 0x8b) {
+ loc[-1] = 0xb8;
+ } else if (loc[-2] == 0x8b) {
// "movl foo@indntpoff,%reg" -> "movl $foo,%reg"
- Loc[-2] = 0xc7;
- Loc[-1] = 0xc0 | Reg;
+ loc[-2] = 0xc7;
+ loc[-1] = 0xc0 | reg;
} else {
// "addl foo@indntpoff,%reg" -> "addl $foo,%reg"
- Loc[-2] = 0x81;
- Loc[-1] = 0xc0 | Reg;
+ loc[-2] = 0x81;
+ loc[-1] = 0xc0 | reg;
}
} else {
- assert(Type == R_386_TLS_GOTIE);
- if (Loc[-2] == 0x8b) {
+ assert(type == R_386_TLS_GOTIE);
+ if (loc[-2] == 0x8b) {
// "movl foo@gottpoff(%rip),%reg" -> "movl $foo,%reg"
- Loc[-2] = 0xc7;
- Loc[-1] = 0xc0 | Reg;
+ loc[-2] = 0xc7;
+ loc[-1] = 0xc0 | reg;
} else {
// "addl foo@gotntpoff(%rip),%reg" -> "leal foo(%reg),%reg"
- Loc[-2] = 0x8d;
- Loc[-1] = 0x80 | (Reg << 3) | Reg;
+ loc[-2] = 0x8d;
+ loc[-1] = 0x80 | (reg << 3) | reg;
}
}
- write32le(Loc, Val);
+ write32le(loc, val);
}
-void X86::relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
- if (Type == R_386_TLS_LDO_32) {
- write32le(Loc, Val);
+void X86::relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ if (type == R_386_TLS_LDO_32) {
+ write32le(loc, val);
return;
}
@@ -403,48 +400,48 @@ void X86::relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const {
// movl %gs:0,%eax
// nop
// leal 0(%esi,1),%esi
- const uint8_t Inst[] = {
+ const uint8_t inst[] = {
0x65, 0xa1, 0x00, 0x00, 0x00, 0x00, // movl %gs:0,%eax
0x90, // nop
0x8d, 0x74, 0x26, 0x00, // leal 0(%esi,1),%esi
};
- memcpy(Loc - 2, Inst, sizeof(Inst));
+ memcpy(loc - 2, inst, sizeof(inst));
}
namespace {
class RetpolinePic : public X86 {
public:
RetpolinePic();
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
};
class RetpolineNoPic : public X86 {
public:
RetpolineNoPic();
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
};
} // namespace
RetpolinePic::RetpolinePic() {
- PltHeaderSize = 48;
- PltEntrySize = 32;
+ pltHeaderSize = 48;
+ pltEntrySize = 32;
}
-void RetpolinePic::writeGotPlt(uint8_t *Buf, const Symbol &S) const {
- write32le(Buf, S.getPltVA() + 17);
+void RetpolinePic::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ write32le(buf, s.getPltVA() + 17);
}
-void RetpolinePic::writePltHeader(uint8_t *Buf) const {
- const uint8_t Insn[] = {
- 0xff, 0xb3, 0, 0, 0, 0, // 0: pushl GOTPLT+4(%ebx)
+void RetpolinePic::writePltHeader(uint8_t *buf) const {
+ const uint8_t insn[] = {
+ 0xff, 0xb3, 4, 0, 0, 0, // 0: pushl 4(%ebx)
0x50, // 6: pushl %eax
- 0x8b, 0x83, 0, 0, 0, 0, // 7: mov GOTPLT+8(%ebx), %eax
+ 0x8b, 0x83, 8, 0, 0, 0, // 7: mov 8(%ebx), %eax
0xe8, 0x0e, 0x00, 0x00, 0x00, // d: call next
0xf3, 0x90, // 12: loop: pause
0x0f, 0xae, 0xe8, // 14: lfence
@@ -458,18 +455,13 @@ void RetpolinePic::writePltHeader(uint8_t *Buf) const {
0xc3, // 2e: ret
0xcc, // 2f: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
-
- uint32_t Ebx = In.Got->getVA() + In.Got->getSize();
- uint32_t GotPlt = In.GotPlt->getVA() - Ebx;
- write32le(Buf + 2, GotPlt + 4);
- write32le(Buf + 9, GotPlt + 8);
+ memcpy(buf, insn, sizeof(insn));
}
-void RetpolinePic::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Insn[] = {
+void RetpolinePic::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t insn[] = {
0x50, // pushl %eax
0x8b, 0x83, 0, 0, 0, 0, // mov foo@GOT(%ebx), %eax
0xe8, 0, 0, 0, 0, // call plt+0x20
@@ -478,28 +470,28 @@ void RetpolinePic::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
0xe9, 0, 0, 0, 0, // jmp plt+0
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
-
- uint32_t Ebx = In.Got->getVA() + In.Got->getSize();
- unsigned Off = getPltEntryOffset(Index);
- write32le(Buf + 3, GotPltEntryAddr - Ebx);
- write32le(Buf + 8, -Off - 12 + 32);
- write32le(Buf + 13, -Off - 17 + 18);
- write32le(Buf + 18, RelOff);
- write32le(Buf + 23, -Off - 27);
+ memcpy(buf, insn, sizeof(insn));
+
+ uint32_t ebx = in.gotPlt->getVA();
+ unsigned off = pltHeaderSize + pltEntrySize * index;
+ write32le(buf + 3, gotPltEntryAddr - ebx);
+ write32le(buf + 8, -off - 12 + 32);
+ write32le(buf + 13, -off - 17 + 18);
+ write32le(buf + 18, relOff);
+ write32le(buf + 23, -off - 27);
}
RetpolineNoPic::RetpolineNoPic() {
- PltHeaderSize = 48;
- PltEntrySize = 32;
+ pltHeaderSize = 48;
+ pltEntrySize = 32;
}
-void RetpolineNoPic::writeGotPlt(uint8_t *Buf, const Symbol &S) const {
- write32le(Buf, S.getPltVA() + 16);
+void RetpolineNoPic::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ write32le(buf, s.getPltVA() + 16);
}
-void RetpolineNoPic::writePltHeader(uint8_t *Buf) const {
- const uint8_t Insn[] = {
+void RetpolineNoPic::writePltHeader(uint8_t *buf) const {
+ const uint8_t insn[] = {
0xff, 0x35, 0, 0, 0, 0, // 0: pushl GOTPLT+4
0x50, // 6: pushl %eax
0xa1, 0, 0, 0, 0, // 7: mov GOTPLT+8, %eax
@@ -517,17 +509,17 @@ void RetpolineNoPic::writePltHeader(uint8_t *Buf) const {
0xc3, // 2e: ret
0xcc, // 2f: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
+ memcpy(buf, insn, sizeof(insn));
- uint32_t GotPlt = In.GotPlt->getVA();
- write32le(Buf + 2, GotPlt + 4);
- write32le(Buf + 8, GotPlt + 8);
+ uint32_t gotPlt = in.gotPlt->getVA();
+ write32le(buf + 2, gotPlt + 4);
+ write32le(buf + 8, gotPlt + 8);
}
-void RetpolineNoPic::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Insn[] = {
+void RetpolineNoPic::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t insn[] = {
0x50, // 0: pushl %eax
0xa1, 0, 0, 0, 0, // 1: mov foo_in_GOT, %eax
0xe8, 0, 0, 0, 0, // 6: call plt+0x20
@@ -537,26 +529,26 @@ void RetpolineNoPic::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1a: int3; padding
0xcc, // 1f: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
-
- unsigned Off = getPltEntryOffset(Index);
- write32le(Buf + 2, GotPltEntryAddr);
- write32le(Buf + 7, -Off - 11 + 32);
- write32le(Buf + 12, -Off - 16 + 17);
- write32le(Buf + 17, RelOff);
- write32le(Buf + 22, -Off - 26);
+ memcpy(buf, insn, sizeof(insn));
+
+ unsigned off = pltHeaderSize + pltEntrySize * index;
+ write32le(buf + 2, gotPltEntryAddr);
+ write32le(buf + 7, -off - 11 + 32);
+ write32le(buf + 12, -off - 16 + 17);
+ write32le(buf + 17, relOff);
+ write32le(buf + 22, -off - 26);
}
TargetInfo *elf::getX86TargetInfo() {
- if (Config->ZRetpolineplt) {
- if (Config->Pic) {
- static RetpolinePic T;
- return &T;
+ if (config->zRetpolineplt) {
+ if (config->isPic) {
+ static RetpolinePic t;
+ return &t;
}
- static RetpolineNoPic T;
- return &T;
+ static RetpolineNoPic t;
+ return &t;
}
- static X86 T;
- return &T;
+ static X86 t;
+ return &t;
}
diff --git a/contrib/llvm/tools/lld/ELF/Arch/X86_64.cpp b/contrib/llvm/tools/lld/ELF/Arch/X86_64.cpp
index 078128c8ad6d..de67aa5c33dc 100644
--- a/contrib/llvm/tools/lld/ELF/Arch/X86_64.cpp
+++ b/contrib/llvm/tools/lld/ELF/Arch/X86_64.cpp
@@ -1,9 +1,8 @@
//===- X86_64.cpp ---------------------------------------------------------===//
//
-// The LLVM Linker
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
@@ -23,74 +22,74 @@ using namespace lld;
using namespace lld::elf;
namespace {
-template <class ELFT> class X86_64 : public TargetInfo {
+class X86_64 : public TargetInfo {
public:
X86_64();
- RelExpr getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const override;
- RelType getDynRel(RelType Type) const override;
- void writeGotPltHeader(uint8_t *Buf) const override;
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
- void relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const override;
-
- RelExpr adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr Expr) const override;
- void relaxGot(uint8_t *Loc, uint64_t Val) const override;
- void relaxTlsGdToIe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsGdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsIeToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- void relaxTlsLdToLe(uint8_t *Loc, RelType Type, uint64_t Val) const override;
- bool adjustPrologueForCrossSplitStack(uint8_t *Loc, uint8_t *End,
- uint8_t StOther) const override;
-
-private:
- void relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
- uint8_t ModRm) const;
+ int getTlsGdRelaxSkip(RelType type) const override;
+ RelExpr getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const override;
+ RelType getDynRel(RelType type) const override;
+ void writeGotPltHeader(uint8_t *buf) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
+ void relocateOne(uint8_t *loc, RelType type, uint64_t val) const override;
+
+ RelExpr adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr expr) const override;
+ void relaxGot(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ void relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const override;
+ bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
+ uint8_t stOther) const override;
};
} // namespace
-template <class ELFT> X86_64<ELFT>::X86_64() {
- CopyRel = R_X86_64_COPY;
- GotRel = R_X86_64_GLOB_DAT;
- NoneRel = R_X86_64_NONE;
- PltRel = R_X86_64_JUMP_SLOT;
- RelativeRel = R_X86_64_RELATIVE;
- IRelativeRel = R_X86_64_IRELATIVE;
- TlsGotRel = R_X86_64_TPOFF64;
- TlsModuleIndexRel = R_X86_64_DTPMOD64;
- TlsOffsetRel = R_X86_64_DTPOFF64;
- GotEntrySize = 8;
- GotPltEntrySize = 8;
- PltEntrySize = 16;
- PltHeaderSize = 16;
- TlsGdRelaxSkip = 2;
- TrapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3
+X86_64::X86_64() {
+ copyRel = R_X86_64_COPY;
+ gotRel = R_X86_64_GLOB_DAT;
+ noneRel = R_X86_64_NONE;
+ pltRel = R_X86_64_JUMP_SLOT;
+ relativeRel = R_X86_64_RELATIVE;
+ iRelativeRel = R_X86_64_IRELATIVE;
+ symbolicRel = R_X86_64_64;
+ tlsDescRel = R_X86_64_TLSDESC;
+ tlsGotRel = R_X86_64_TPOFF64;
+ tlsModuleIndexRel = R_X86_64_DTPMOD64;
+ tlsOffsetRel = R_X86_64_DTPOFF64;
+ pltEntrySize = 16;
+ pltHeaderSize = 16;
+ trapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3
// Align to the large page size (known as a superpage or huge page).
// FreeBSD automatically promotes large, superpage-aligned allocations.
- DefaultImageBase = 0x200000;
+ defaultImageBase = 0x200000;
}
-template <class ELFT>
-RelExpr X86_64<ELFT>::getRelExpr(RelType Type, const Symbol &S,
- const uint8_t *Loc) const {
- if (Type == R_X86_64_GOTTPOFF)
- Config->HasStaticTlsModel = true;
+int X86_64::getTlsGdRelaxSkip(RelType type) const { return 2; }
+
+RelExpr X86_64::getRelExpr(RelType type, const Symbol &s,
+ const uint8_t *loc) const {
+ if (type == R_X86_64_GOTTPOFF)
+ config->hasStaticTlsModel = true;
- switch (Type) {
+ switch (type) {
case R_X86_64_8:
case R_X86_64_16:
case R_X86_64_32:
case R_X86_64_32S:
case R_X86_64_64:
+ return R_ABS;
case R_X86_64_DTPOFF32:
case R_X86_64_DTPOFF64:
- return R_ABS;
+ return R_DTPREL;
case R_X86_64_TPOFF32:
return R_TLS;
+ case R_X86_64_TLSDESC_CALL:
+ return R_TLSDESC_CALL;
case R_X86_64_TLSLD:
return R_TLSLD_PC;
case R_X86_64_TLSGD:
@@ -100,189 +99,222 @@ RelExpr X86_64<ELFT>::getRelExpr(RelType Type, const Symbol &S,
return R_SIZE;
case R_X86_64_PLT32:
return R_PLT_PC;
+ case R_X86_64_PC8:
+ case R_X86_64_PC16:
case R_X86_64_PC32:
case R_X86_64_PC64:
return R_PC;
case R_X86_64_GOT32:
case R_X86_64_GOT64:
- return R_GOT_FROM_END;
+ return R_GOTPLT;
+ case R_X86_64_GOTPC32_TLSDESC:
+ return R_TLSDESC_PC;
case R_X86_64_GOTPCREL:
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
case R_X86_64_GOTTPOFF:
return R_GOT_PC;
case R_X86_64_GOTOFF64:
- return R_GOTREL_FROM_END;
+ return R_GOTPLTREL;
case R_X86_64_GOTPC32:
case R_X86_64_GOTPC64:
- return R_GOTONLY_PC_FROM_END;
+ return R_GOTPLTONLY_PC;
case R_X86_64_NONE:
return R_NONE;
default:
- return R_INVALID;
+ error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) +
+ ") against symbol " + toString(s));
+ return R_NONE;
}
}
-template <class ELFT> void X86_64<ELFT>::writeGotPltHeader(uint8_t *Buf) const {
+void X86_64::writeGotPltHeader(uint8_t *buf) const {
// The first entry holds the value of _DYNAMIC. It is not clear why that is
// required, but it is documented in the psabi and the glibc dynamic linker
// seems to use it (note that this is relevant for linking ld.so, not any
// other program).
- write64le(Buf, In.Dynamic->getVA());
+ write64le(buf, mainPart->dynamic->getVA());
}
-template <class ELFT>
-void X86_64<ELFT>::writeGotPlt(uint8_t *Buf, const Symbol &S) const {
+void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const {
// See comments in X86::writeGotPlt.
- write64le(Buf, S.getPltVA() + 6);
+ write64le(buf, s.getPltVA() + 6);
}
-template <class ELFT> void X86_64<ELFT>::writePltHeader(uint8_t *Buf) const {
- const uint8_t PltData[] = {
+void X86_64::writePltHeader(uint8_t *buf) const {
+ const uint8_t pltData[] = {
0xff, 0x35, 0, 0, 0, 0, // pushq GOTPLT+8(%rip)
0xff, 0x25, 0, 0, 0, 0, // jmp *GOTPLT+16(%rip)
0x0f, 0x1f, 0x40, 0x00, // nop
};
- memcpy(Buf, PltData, sizeof(PltData));
- uint64_t GotPlt = In.GotPlt->getVA();
- uint64_t Plt = In.Plt->getVA();
- write32le(Buf + 2, GotPlt - Plt + 2); // GOTPLT+8
- write32le(Buf + 8, GotPlt - Plt + 4); // GOTPLT+16
+ memcpy(buf, pltData, sizeof(pltData));
+ uint64_t gotPlt = in.gotPlt->getVA();
+ uint64_t plt = in.plt->getVA();
+ write32le(buf + 2, gotPlt - plt + 2); // GOTPLT+8
+ write32le(buf + 8, gotPlt - plt + 4); // GOTPLT+16
}
-template <class ELFT>
-void X86_64<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Inst[] = {
+void X86_64::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t inst[] = {
0xff, 0x25, 0, 0, 0, 0, // jmpq *got(%rip)
0x68, 0, 0, 0, 0, // pushq <relocation index>
0xe9, 0, 0, 0, 0, // jmpq plt[0]
};
- memcpy(Buf, Inst, sizeof(Inst));
+ memcpy(buf, inst, sizeof(inst));
- write32le(Buf + 2, GotPltEntryAddr - PltEntryAddr - 6);
- write32le(Buf + 7, Index);
- write32le(Buf + 12, -getPltEntryOffset(Index) - 16);
+ write32le(buf + 2, gotPltEntryAddr - pltEntryAddr - 6);
+ write32le(buf + 7, index);
+ write32le(buf + 12, -pltHeaderSize - pltEntrySize * index - 16);
}
-template <class ELFT> RelType X86_64<ELFT>::getDynRel(RelType Type) const {
- if (Type == R_X86_64_64 || Type == R_X86_64_PC64 || Type == R_X86_64_SIZE32 ||
- Type == R_X86_64_SIZE64)
- return Type;
+RelType X86_64::getDynRel(RelType type) const {
+ if (type == R_X86_64_64 || type == R_X86_64_PC64 || type == R_X86_64_SIZE32 ||
+ type == R_X86_64_SIZE64)
+ return type;
return R_X86_64_NONE;
}
-template <class ELFT>
-void X86_64<ELFT>::relaxTlsGdToLe(uint8_t *Loc, RelType Type,
- uint64_t Val) const {
- // Convert
- // .byte 0x66
- // leaq x@tlsgd(%rip), %rdi
- // .word 0x6666
- // rex64
- // call __tls_get_addr@plt
- // to
- // mov %fs:0x0,%rax
- // lea x@tpoff,%rax
- const uint8_t Inst[] = {
- 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
- 0x48, 0x8d, 0x80, 0, 0, 0, 0, // lea x@tpoff,%rax
- };
- memcpy(Loc - 4, Inst, sizeof(Inst));
-
- // The original code used a pc relative relocation and so we have to
- // compensate for the -4 in had in the addend.
- write32le(Loc + 8, Val + 4);
+void X86_64::relaxTlsGdToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ if (type == R_X86_64_TLSGD) {
+ // Convert
+ // .byte 0x66
+ // leaq x@tlsgd(%rip), %rdi
+ // .word 0x6666
+ // rex64
+ // call __tls_get_addr@plt
+ // to the following two instructions.
+ const uint8_t inst[] = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00,
+ 0x00, 0x00, // mov %fs:0x0,%rax
+ 0x48, 0x8d, 0x80, 0, 0, 0, 0, // lea x@tpoff,%rax
+ };
+ memcpy(loc - 4, inst, sizeof(inst));
+
+ // The original code used a pc relative relocation and so we have to
+ // compensate for the -4 in had in the addend.
+ write32le(loc + 8, val + 4);
+ } else {
+ // Convert
+ // lea x@tlsgd(%rip), %rax
+ // call *(%rax)
+ // to the following two instructions.
+ assert(type == R_X86_64_GOTPC32_TLSDESC);
+ if (memcmp(loc - 3, "\x48\x8d\x05", 3)) {
+ error(getErrorLocation(loc - 3) + "R_X86_64_GOTPC32_TLSDESC must be used "
+ "in callq *x@tlsdesc(%rip), %rax");
+ return;
+ }
+ // movq $x@tpoff(%rip),%rax
+ loc[-2] = 0xc7;
+ loc[-1] = 0xc0;
+ write32le(loc, val + 4);
+ // xchg ax,ax
+ loc[4] = 0x66;
+ loc[5] = 0x90;
+ }
}
-template <class ELFT>
-void X86_64<ELFT>::relaxTlsGdToIe(uint8_t *Loc, RelType Type,
- uint64_t Val) const {
- // Convert
- // .byte 0x66
- // leaq x@tlsgd(%rip), %rdi
- // .word 0x6666
- // rex64
- // call __tls_get_addr@plt
- // to
- // mov %fs:0x0,%rax
- // addq x@tpoff,%rax
- const uint8_t Inst[] = {
- 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0x0,%rax
- 0x48, 0x03, 0x05, 0, 0, 0, 0, // addq x@tpoff,%rax
- };
- memcpy(Loc - 4, Inst, sizeof(Inst));
-
- // Both code sequences are PC relatives, but since we are moving the constant
- // forward by 8 bytes we have to subtract the value by 8.
- write32le(Loc + 8, Val - 8);
+void X86_64::relaxTlsGdToIe(uint8_t *loc, RelType type, uint64_t val) const {
+ if (type == R_X86_64_TLSGD) {
+ // Convert
+ // .byte 0x66
+ // leaq x@tlsgd(%rip), %rdi
+ // .word 0x6666
+ // rex64
+ // call __tls_get_addr@plt
+ // to the following two instructions.
+ const uint8_t inst[] = {
+ 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00,
+ 0x00, 0x00, // mov %fs:0x0,%rax
+ 0x48, 0x03, 0x05, 0, 0, 0, 0, // addq x@gottpoff(%rip),%rax
+ };
+ memcpy(loc - 4, inst, sizeof(inst));
+
+ // Both code sequences are PC relatives, but since we are moving the
+ // constant forward by 8 bytes we have to subtract the value by 8.
+ write32le(loc + 8, val - 8);
+ } else {
+ // Convert
+ // lea x@tlsgd(%rip), %rax
+ // call *(%rax)
+ // to the following two instructions.
+ assert(type == R_X86_64_GOTPC32_TLSDESC);
+ if (memcmp(loc - 3, "\x48\x8d\x05", 3)) {
+ error(getErrorLocation(loc - 3) + "R_X86_64_GOTPC32_TLSDESC must be used "
+ "in callq *x@tlsdesc(%rip), %rax");
+ return;
+ }
+ // movq x@gottpoff(%rip),%rax
+ loc[-2] = 0x8b;
+ write32le(loc, val);
+ // xchg ax,ax
+ loc[4] = 0x66;
+ loc[5] = 0x90;
+ }
}
// In some conditions, R_X86_64_GOTTPOFF relocation can be optimized to
// R_X86_64_TPOFF32 so that it does not use GOT.
-template <class ELFT>
-void X86_64<ELFT>::relaxTlsIeToLe(uint8_t *Loc, RelType Type,
- uint64_t Val) const {
- uint8_t *Inst = Loc - 3;
- uint8_t Reg = Loc[-1] >> 3;
- uint8_t *RegSlot = Loc - 1;
+void X86_64::relaxTlsIeToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ uint8_t *inst = loc - 3;
+ uint8_t reg = loc[-1] >> 3;
+ uint8_t *regSlot = loc - 1;
// Note that ADD with RSP or R12 is converted to ADD instead of LEA
// because LEA with these registers needs 4 bytes to encode and thus
// wouldn't fit the space.
- if (memcmp(Inst, "\x48\x03\x25", 3) == 0) {
+ if (memcmp(inst, "\x48\x03\x25", 3) == 0) {
// "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp"
- memcpy(Inst, "\x48\x81\xc4", 3);
- } else if (memcmp(Inst, "\x4c\x03\x25", 3) == 0) {
+ memcpy(inst, "\x48\x81\xc4", 3);
+ } else if (memcmp(inst, "\x4c\x03\x25", 3) == 0) {
// "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12"
- memcpy(Inst, "\x49\x81\xc4", 3);
- } else if (memcmp(Inst, "\x4c\x03", 2) == 0) {
+ memcpy(inst, "\x49\x81\xc4", 3);
+ } else if (memcmp(inst, "\x4c\x03", 2) == 0) {
// "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]"
- memcpy(Inst, "\x4d\x8d", 2);
- *RegSlot = 0x80 | (Reg << 3) | Reg;
- } else if (memcmp(Inst, "\x48\x03", 2) == 0) {
+ memcpy(inst, "\x4d\x8d", 2);
+ *regSlot = 0x80 | (reg << 3) | reg;
+ } else if (memcmp(inst, "\x48\x03", 2) == 0) {
// "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg"
- memcpy(Inst, "\x48\x8d", 2);
- *RegSlot = 0x80 | (Reg << 3) | Reg;
- } else if (memcmp(Inst, "\x4c\x8b", 2) == 0) {
+ memcpy(inst, "\x48\x8d", 2);
+ *regSlot = 0x80 | (reg << 3) | reg;
+ } else if (memcmp(inst, "\x4c\x8b", 2) == 0) {
// "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]"
- memcpy(Inst, "\x49\xc7", 2);
- *RegSlot = 0xc0 | Reg;
- } else if (memcmp(Inst, "\x48\x8b", 2) == 0) {
+ memcpy(inst, "\x49\xc7", 2);
+ *regSlot = 0xc0 | reg;
+ } else if (memcmp(inst, "\x48\x8b", 2) == 0) {
// "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg"
- memcpy(Inst, "\x48\xc7", 2);
- *RegSlot = 0xc0 | Reg;
+ memcpy(inst, "\x48\xc7", 2);
+ *regSlot = 0xc0 | reg;
} else {
- error(getErrorLocation(Loc - 3) +
+ error(getErrorLocation(loc - 3) +
"R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only");
}
// The original code used a PC relative relocation.
// Need to compensate for the -4 it had in the addend.
- write32le(Loc, Val + 4);
+ write32le(loc, val + 4);
}
-template <class ELFT>
-void X86_64<ELFT>::relaxTlsLdToLe(uint8_t *Loc, RelType Type,
- uint64_t Val) const {
- if (Type == R_X86_64_DTPOFF64) {
- write64le(Loc, Val);
+void X86_64::relaxTlsLdToLe(uint8_t *loc, RelType type, uint64_t val) const {
+ if (type == R_X86_64_DTPOFF64) {
+ write64le(loc, val);
return;
}
- if (Type == R_X86_64_DTPOFF32) {
- write32le(Loc, Val);
+ if (type == R_X86_64_DTPOFF32) {
+ write32le(loc, val);
return;
}
- const uint8_t Inst[] = {
+ const uint8_t inst[] = {
0x66, 0x66, // .word 0x6666
0x66, // .byte 0x66
0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0,%rax
};
- if (Loc[4] == 0xe8) {
+ if (loc[4] == 0xe8) {
// Convert
// leaq bar@tlsld(%rip), %rdi # 48 8d 3d <Loc>
// callq __tls_get_addr@PLT # e8 <disp32>
@@ -292,11 +324,11 @@ void X86_64<ELFT>::relaxTlsLdToLe(uint8_t *Loc, RelType Type,
// .byte 0x66
// mov %fs:0,%rax
// leaq bar@tpoff(%rax), %rcx
- memcpy(Loc - 3, Inst, sizeof(Inst));
+ memcpy(loc - 3, inst, sizeof(inst));
return;
}
- if (Loc[4] == 0xff && Loc[5] == 0x15) {
+ if (loc[4] == 0xff && loc[5] == 0x15) {
// Convert
// leaq x@tlsld(%rip),%rdi # 48 8d 3d <Loc>
// call *__tls_get_addr@GOTPCREL(%rip) # ff 15 <disp32>
@@ -305,34 +337,42 @@ void X86_64<ELFT>::relaxTlsLdToLe(uint8_t *Loc, RelType Type,
// movq %fs:0,%rax
// See "Table 11.9: LD -> LE Code Transition (LP64)" in
// https://raw.githubusercontent.com/wiki/hjl-tools/x86-psABI/x86-64-psABI-1.0.pdf
- Loc[-3] = 0x66;
- memcpy(Loc - 2, Inst, sizeof(Inst));
+ loc[-3] = 0x66;
+ memcpy(loc - 2, inst, sizeof(inst));
return;
}
- error(getErrorLocation(Loc - 3) +
+ error(getErrorLocation(loc - 3) +
"expected R_X86_64_PLT32 or R_X86_64_GOTPCRELX after R_X86_64_TLSLD");
}
-template <class ELFT>
-void X86_64<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
- switch (Type) {
+void X86_64::relocateOne(uint8_t *loc, RelType type, uint64_t val) const {
+ switch (type) {
case R_X86_64_8:
- checkUInt(Loc, Val, 8, Type);
- *Loc = Val;
+ checkIntUInt(loc, val, 8, type);
+ *loc = val;
+ break;
+ case R_X86_64_PC8:
+ checkInt(loc, val, 8, type);
+ *loc = val;
break;
case R_X86_64_16:
- checkUInt(Loc, Val, 16, Type);
- write16le(Loc, Val);
+ checkIntUInt(loc, val, 16, type);
+ write16le(loc, val);
+ break;
+ case R_X86_64_PC16:
+ checkInt(loc, val, 16, type);
+ write16le(loc, val);
break;
case R_X86_64_32:
- checkUInt(Loc, Val, 32, Type);
- write32le(Loc, Val);
+ checkUInt(loc, val, 32, type);
+ write32le(loc, val);
break;
case R_X86_64_32S:
case R_X86_64_TPOFF32:
case R_X86_64_GOT32:
case R_X86_64_GOTPC32:
+ case R_X86_64_GOTPC32_TLSDESC:
case R_X86_64_GOTPCREL:
case R_X86_64_GOTPCRELX:
case R_X86_64_REX_GOTPCRELX:
@@ -343,49 +383,47 @@ void X86_64<ELFT>::relocateOne(uint8_t *Loc, RelType Type, uint64_t Val) const {
case R_X86_64_TLSLD:
case R_X86_64_DTPOFF32:
case R_X86_64_SIZE32:
- checkInt(Loc, Val, 32, Type);
- write32le(Loc, Val);
+ checkInt(loc, val, 32, type);
+ write32le(loc, val);
break;
case R_X86_64_64:
case R_X86_64_DTPOFF64:
- case R_X86_64_GLOB_DAT:
case R_X86_64_PC64:
case R_X86_64_SIZE64:
case R_X86_64_GOT64:
case R_X86_64_GOTOFF64:
case R_X86_64_GOTPC64:
- write64le(Loc, Val);
+ write64le(loc, val);
break;
default:
- error(getErrorLocation(Loc) + "unrecognized reloc " + Twine(Type));
+ llvm_unreachable("unknown relocation");
}
}
-template <class ELFT>
-RelExpr X86_64<ELFT>::adjustRelaxExpr(RelType Type, const uint8_t *Data,
- RelExpr RelExpr) const {
- if (Type != R_X86_64_GOTPCRELX && Type != R_X86_64_REX_GOTPCRELX)
- return RelExpr;
- const uint8_t Op = Data[-2];
- const uint8_t ModRm = Data[-1];
+RelExpr X86_64::adjustRelaxExpr(RelType type, const uint8_t *data,
+ RelExpr relExpr) const {
+ if (type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX)
+ return relExpr;
+ const uint8_t op = data[-2];
+ const uint8_t modRm = data[-1];
// FIXME: When PIC is disabled and foo is defined locally in the
// lower 32 bit address space, memory operand in mov can be converted into
// immediate operand. Otherwise, mov must be changed to lea. We support only
// latter relaxation at this moment.
- if (Op == 0x8b)
+ if (op == 0x8b)
return R_RELAX_GOT_PC;
// Relax call and jmp.
- if (Op == 0xff && (ModRm == 0x15 || ModRm == 0x25))
+ if (op == 0xff && (modRm == 0x15 || modRm == 0x25))
return R_RELAX_GOT_PC;
// Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor.
// If PIC then no relaxation is available.
// We also don't relax test/binop instructions without REX byte,
// they are 32bit operations and not common to have.
- assert(Type == R_X86_64_REX_GOTPCRELX);
- return Config->Pic ? RelExpr : R_RELAX_GOT_PC_NOPIC;
+ assert(type == R_X86_64_REX_GOTPCRELX);
+ return config->isPic ? relExpr : R_RELAX_GOT_PC_NOPIC;
}
// A subset of relaxations can only be applied for no-PIC. This method
@@ -393,12 +431,11 @@ RelExpr X86_64<ELFT>::adjustRelaxExpr(RelType Type, const uint8_t *Data,
// "Intel 64 and IA-32 Architectures Software Developer's Manual V2"
// (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/
// 64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf)
-template <class ELFT>
-void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
- uint8_t ModRm) const {
- const uint8_t Rex = Loc[-3];
+static void relaxGotNoPic(uint8_t *loc, uint64_t val, uint8_t op,
+ uint8_t modRm) {
+ const uint8_t rex = loc[-3];
// Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg".
- if (Op == 0x85) {
+ if (op == 0x85) {
// See "TEST-Logical Compare" (4-428 Vol. 2B),
// TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension).
@@ -415,11 +452,11 @@ void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
// 0x38 == 00 111 000 binary.
// We transfer reg2 to reg1 here as operand.
// See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3).
- Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3; // ModR/M byte.
+ loc[-1] = 0xc0 | (modRm & 0x38) >> 3; // ModR/M byte.
// Change opcode from TEST r/m64, r64 to TEST r/m64, imm32
// See "TEST-Logical Compare" (4-428 Vol. 2B).
- Loc[-2] = 0xf7;
+ loc[-2] = 0xf7;
// Move R bit to the B bit in REX byte.
// REX byte is encoded as 0100WRXB, where
@@ -432,8 +469,8 @@ void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
// REX.B This 1-bit value is an extension to the MODRM.rm field or the
// SIB.base field.
// See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A).
- Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
- write32le(Loc, Val);
+ loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2;
+ write32le(loc, val);
return;
}
@@ -443,7 +480,7 @@ void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
// Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg".
// Logic is close to one for test instruction above, but we also
// write opcode extension here, see below for details.
- Loc[-1] = 0xc0 | (ModRm & 0x38) >> 3 | (Op & 0x3c); // ModR/M byte.
+ loc[-1] = 0xc0 | (modRm & 0x38) >> 3 | (op & 0x3c); // ModR/M byte.
// Primary opcode is 0x81, opcode extension is one of:
// 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB,
@@ -452,69 +489,67 @@ void X86_64<ELFT>::relaxGotNoPic(uint8_t *Loc, uint64_t Val, uint8_t Op,
// See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15),
// "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for
// descriptions about each operation.
- Loc[-2] = 0x81;
- Loc[-3] = (Rex & ~0x4) | (Rex & 0x4) >> 2;
- write32le(Loc, Val);
+ loc[-2] = 0x81;
+ loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2;
+ write32le(loc, val);
}
-template <class ELFT>
-void X86_64<ELFT>::relaxGot(uint8_t *Loc, uint64_t Val) const {
- const uint8_t Op = Loc[-2];
- const uint8_t ModRm = Loc[-1];
+void X86_64::relaxGot(uint8_t *loc, RelType type, uint64_t val) const {
+ const uint8_t op = loc[-2];
+ const uint8_t modRm = loc[-1];
// Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg".
- if (Op == 0x8b) {
- Loc[-2] = 0x8d;
- write32le(Loc, Val);
+ if (op == 0x8b) {
+ loc[-2] = 0x8d;
+ write32le(loc, val);
return;
}
- if (Op != 0xff) {
+ if (op != 0xff) {
// We are relaxing a rip relative to an absolute, so compensate
// for the old -4 addend.
- assert(!Config->Pic);
- relaxGotNoPic(Loc, Val + 4, Op, ModRm);
+ assert(!config->isPic);
+ relaxGotNoPic(loc, val + 4, op, modRm);
return;
}
// Convert call/jmp instructions.
- if (ModRm == 0x15) {
+ if (modRm == 0x15) {
// ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo".
// Instead we convert to "addr32 call foo" where addr32 is an instruction
// prefix. That makes result expression to be a single instruction.
- Loc[-2] = 0x67; // addr32 prefix
- Loc[-1] = 0xe8; // call
- write32le(Loc, Val);
+ loc[-2] = 0x67; // addr32 prefix
+ loc[-1] = 0xe8; // call
+ write32le(loc, val);
return;
}
// Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop".
// jmp doesn't return, so it is fine to use nop here, it is just a stub.
- assert(ModRm == 0x25);
- Loc[-2] = 0xe9; // jmp
- Loc[3] = 0x90; // nop
- write32le(Loc - 1, Val + 1);
+ assert(modRm == 0x25);
+ loc[-2] = 0xe9; // jmp
+ loc[3] = 0x90; // nop
+ write32le(loc - 1, val + 1);
}
-// This anonymous namespace works around a warning bug in
-// old versions of gcc. See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56480
-namespace {
-
// A split-stack prologue starts by checking the amount of stack remaining
// in one of two ways:
// A) Comparing of the stack pointer to a field in the tcb.
// B) Or a load of a stack pointer offset with an lea to r10 or r11.
-template <>
-bool X86_64<ELF64LE>::adjustPrologueForCrossSplitStack(uint8_t *Loc,
- uint8_t *End,
- uint8_t StOther) const {
- if (Loc + 8 >= End)
+bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end,
+ uint8_t stOther) const {
+ if (!config->is64) {
+ error("Target doesn't support split stacks.");
+ return false;
+ }
+
+ if (loc + 8 >= end)
return false;
// Replace "cmp %fs:0x70,%rsp" and subsequent branch
// with "stc, nopl 0x0(%rax,%rax,1)"
- if (memcmp(Loc, "\x64\x48\x3b\x24\x25", 5) == 0) {
- memcpy(Loc, "\xf9\x0f\x1f\x84\x00\x00\x00\x00", 8);
+ if (memcmp(loc, "\x64\x48\x3b\x24\x25", 5) == 0) {
+ memcpy(loc, "\xf9\x0f\x1f\x84\x00\x00\x00\x00", 8);
return true;
}
@@ -522,25 +557,16 @@ bool X86_64<ELF64LE>::adjustPrologueForCrossSplitStack(uint8_t *Loc,
// be r10 or r11. The lea instruction feeds a subsequent compare which checks
// if there is X available stack space. Making X larger effectively reserves
// that much additional space. The stack grows downward so subtract the value.
- if (memcmp(Loc, "\x4c\x8d\x94\x24", 4) == 0 ||
- memcmp(Loc, "\x4c\x8d\x9c\x24", 4) == 0) {
+ if (memcmp(loc, "\x4c\x8d\x94\x24", 4) == 0 ||
+ memcmp(loc, "\x4c\x8d\x9c\x24", 4) == 0) {
// The offset bytes are encoded four bytes after the start of the
// instruction.
- write32le(Loc + 4, read32le(Loc + 4) - 0x4000);
+ write32le(loc + 4, read32le(loc + 4) - 0x4000);
return true;
}
return false;
}
-template <>
-bool X86_64<ELF32LE>::adjustPrologueForCrossSplitStack(uint8_t *Loc,
- uint8_t *End,
- uint8_t StOther) const {
- llvm_unreachable("Target doesn't support split stacks.");
-}
-
-} // namespace
-
// These nonstandard PLT entries are to migtigate Spectre v2 security
// vulnerability. In order to mitigate Spectre v2, we want to avoid indirect
// branch instructions such as `jmp *GOTPLT(%rip)`. So, in the following PLT
@@ -551,37 +577,36 @@ bool X86_64<ELF32LE>::adjustPrologueForCrossSplitStack(uint8_t *Loc,
// is specified, all dynamic symbols are resolved at load-time. Thus, when
// that option is given, we can omit code for symbol lazy resolution.
namespace {
-template <class ELFT> class Retpoline : public X86_64<ELFT> {
+class Retpoline : public X86_64 {
public:
Retpoline();
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override;
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override;
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
};
-template <class ELFT> class RetpolineZNow : public X86_64<ELFT> {
+class RetpolineZNow : public X86_64 {
public:
RetpolineZNow();
- void writeGotPlt(uint8_t *Buf, const Symbol &S) const override {}
- void writePltHeader(uint8_t *Buf) const override;
- void writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr, uint64_t PltEntryAddr,
- int32_t Index, unsigned RelOff) const override;
+ void writeGotPlt(uint8_t *buf, const Symbol &s) const override {}
+ void writePltHeader(uint8_t *buf) const override;
+ void writePlt(uint8_t *buf, uint64_t gotPltEntryAddr, uint64_t pltEntryAddr,
+ int32_t index, unsigned relOff) const override;
};
} // namespace
-template <class ELFT> Retpoline<ELFT>::Retpoline() {
- TargetInfo::PltHeaderSize = 48;
- TargetInfo::PltEntrySize = 32;
+Retpoline::Retpoline() {
+ pltHeaderSize = 48;
+ pltEntrySize = 32;
}
-template <class ELFT>
-void Retpoline<ELFT>::writeGotPlt(uint8_t *Buf, const Symbol &S) const {
- write64le(Buf, S.getPltVA() + 17);
+void Retpoline::writeGotPlt(uint8_t *buf, const Symbol &s) const {
+ write64le(buf, s.getPltVA() + 17);
}
-template <class ELFT> void Retpoline<ELFT>::writePltHeader(uint8_t *Buf) const {
- const uint8_t Insn[] = {
+void Retpoline::writePltHeader(uint8_t *buf) const {
+ const uint8_t insn[] = {
0xff, 0x35, 0, 0, 0, 0, // 0: pushq GOTPLT+8(%rip)
0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 6: mov GOTPLT+16(%rip), %r11
0xe8, 0x0e, 0x00, 0x00, 0x00, // d: callq next
@@ -594,19 +619,18 @@ template <class ELFT> void Retpoline<ELFT>::writePltHeader(uint8_t *Buf) const {
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 25: int3; padding
0xcc, 0xcc, 0xcc, 0xcc, // 2c: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
+ memcpy(buf, insn, sizeof(insn));
- uint64_t GotPlt = In.GotPlt->getVA();
- uint64_t Plt = In.Plt->getVA();
- write32le(Buf + 2, GotPlt - Plt - 6 + 8);
- write32le(Buf + 9, GotPlt - Plt - 13 + 16);
+ uint64_t gotPlt = in.gotPlt->getVA();
+ uint64_t plt = in.plt->getVA();
+ write32le(buf + 2, gotPlt - plt - 6 + 8);
+ write32le(buf + 9, gotPlt - plt - 13 + 16);
}
-template <class ELFT>
-void Retpoline<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Insn[] = {
+void Retpoline::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t insn[] = {
0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 0: mov foo@GOTPLT(%rip), %r11
0xe8, 0, 0, 0, 0, // 7: callq plt+0x20
0xe9, 0, 0, 0, 0, // c: jmp plt+0x12
@@ -614,25 +638,24 @@ void Retpoline<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
0xe9, 0, 0, 0, 0, // 16: jmp plt+0
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1b: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
+ memcpy(buf, insn, sizeof(insn));
- uint64_t Off = getPltEntryOffset(Index);
+ uint64_t off = pltHeaderSize + pltEntrySize * index;
- write32le(Buf + 3, GotPltEntryAddr - PltEntryAddr - 7);
- write32le(Buf + 8, -Off - 12 + 32);
- write32le(Buf + 13, -Off - 17 + 18);
- write32le(Buf + 18, Index);
- write32le(Buf + 23, -Off - 27);
+ write32le(buf + 3, gotPltEntryAddr - pltEntryAddr - 7);
+ write32le(buf + 8, -off - 12 + 32);
+ write32le(buf + 13, -off - 17 + 18);
+ write32le(buf + 18, index);
+ write32le(buf + 23, -off - 27);
}
-template <class ELFT> RetpolineZNow<ELFT>::RetpolineZNow() {
- TargetInfo::PltHeaderSize = 32;
- TargetInfo::PltEntrySize = 16;
+RetpolineZNow::RetpolineZNow() {
+ pltHeaderSize = 32;
+ pltEntrySize = 16;
}
-template <class ELFT>
-void RetpolineZNow<ELFT>::writePltHeader(uint8_t *Buf) const {
- const uint8_t Insn[] = {
+void RetpolineZNow::writePltHeader(uint8_t *buf) const {
+ const uint8_t insn[] = {
0xe8, 0x0b, 0x00, 0x00, 0x00, // 0: call next
0xf3, 0x90, // 5: loop: pause
0x0f, 0xae, 0xe8, // 7: lfence
@@ -644,37 +667,35 @@ void RetpolineZNow<ELFT>::writePltHeader(uint8_t *Buf) const {
0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1a: int3; padding
0xcc, // 1f: int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
+ memcpy(buf, insn, sizeof(insn));
}
-template <class ELFT>
-void RetpolineZNow<ELFT>::writePlt(uint8_t *Buf, uint64_t GotPltEntryAddr,
- uint64_t PltEntryAddr, int32_t Index,
- unsigned RelOff) const {
- const uint8_t Insn[] = {
+void RetpolineZNow::writePlt(uint8_t *buf, uint64_t gotPltEntryAddr,
+ uint64_t pltEntryAddr, int32_t index,
+ unsigned relOff) const {
+ const uint8_t insn[] = {
0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // mov foo@GOTPLT(%rip), %r11
0xe9, 0, 0, 0, 0, // jmp plt+0
0xcc, 0xcc, 0xcc, 0xcc, // int3; padding
};
- memcpy(Buf, Insn, sizeof(Insn));
+ memcpy(buf, insn, sizeof(insn));
- write32le(Buf + 3, GotPltEntryAddr - PltEntryAddr - 7);
- write32le(Buf + 8, -getPltEntryOffset(Index) - 12);
+ write32le(buf + 3, gotPltEntryAddr - pltEntryAddr - 7);
+ write32le(buf + 8, -pltHeaderSize - pltEntrySize * index - 12);
}
-template <class ELFT> static TargetInfo *getTargetInfo() {
- if (Config->ZRetpolineplt) {
- if (Config->ZNow) {
- static RetpolineZNow<ELFT> T;
- return &T;
+static TargetInfo *getTargetInfo() {
+ if (config->zRetpolineplt) {
+ if (config->zNow) {
+ static RetpolineZNow t;
+ return &t;
}
- static Retpoline<ELFT> T;
- return &T;
+ static Retpoline t;
+ return &t;
}
- static X86_64<ELFT> T;
- return &T;
+ static X86_64 t;
+ return &t;
}
-TargetInfo *elf::getX32TargetInfo() { return getTargetInfo<ELF32LE>(); }
-TargetInfo *elf::getX86_64TargetInfo() { return getTargetInfo<ELF64LE>(); }
+TargetInfo *elf::getX86_64TargetInfo() { return getTargetInfo(); }