diff options
Diffstat (limited to 'contrib/llvm/lib/Target/AArch64/MCTargetDesc')
12 files changed, 2445 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp new file mode 100644 index 000000000000..8a9077c1cab4 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -0,0 +1,585 @@ +//===-- AArch64AsmBackend.cpp - AArch64 Assembler Backend -----------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the AArch64 implementation of the MCAsmBackend class, +// which is principally concerned with relaxation of the various fixup kinds. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/AArch64FixupKinds.h" +#include "MCTargetDesc/AArch64MCTargetDesc.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCFixupKindInfo.h" +#include "llvm/MC/MCObjectWriter.h" +#include "llvm/Support/ELF.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" +using namespace llvm; + +namespace { +class AArch64AsmBackend : public MCAsmBackend { + const MCSubtargetInfo* STI; +public: + AArch64AsmBackend(const Target &T, const StringRef TT) + : MCAsmBackend(), + STI(AArch64_MC::createAArch64MCSubtargetInfo(TT, "", "")) + {} + + + ~AArch64AsmBackend() { + delete STI; + } + + bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; + + virtual void processFixupValue(const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFixup &Fixup, const MCFragment *DF, + MCValue &Target, uint64_t &Value, + bool &IsResolved); +}; +} // end anonymous namespace + +void AArch64AsmBackend::processFixupValue(const MCAssembler &Asm, + const MCAsmLayout &Layout, + const MCFixup &Fixup, + const MCFragment *DF, + MCValue &Target, uint64_t &Value, + bool &IsResolved) { + // The ADRP instruction adds some multiple of 0x1000 to the current PC & + // ~0xfff. This means that the required offset to reach a symbol can vary by + // up to one step depending on where the ADRP is in memory. For example: + // + // ADRP x0, there + // there: + // + // If the ADRP occurs at address 0xffc then "there" will be at 0x1000 and + // we'll need that as an offset. At any other address "there" will be in the + // same page as the ADRP and the instruction should encode 0x0. Assuming the + // section isn't 0x1000-aligned, we therefore need to delegate this decision + // to the linker -- a relocation! + if ((uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_page || + (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_prel_got_page || + (uint32_t)Fixup.getKind() == AArch64::fixup_a64_adr_gottprel_page || + (uint32_t)Fixup.getKind() == AArch64::fixup_a64_tlsdesc_adr_page) + IsResolved = false; +} + + +static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value); + +namespace { + +class ELFAArch64AsmBackend : public AArch64AsmBackend { +public: + uint8_t OSABI; + ELFAArch64AsmBackend(const Target &T, const StringRef TT, + uint8_t _OSABI) + : AArch64AsmBackend(T, TT), OSABI(_OSABI) { } + + bool fixupNeedsRelaxation(const MCFixup &Fixup, + uint64_t Value, + const MCRelaxableFragment *DF, + const MCAsmLayout &Layout) const; + + unsigned int getNumFixupKinds() const { + return AArch64::NumTargetFixupKinds; + } + + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { + const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { +// This table *must* be in the order that the fixup_* kinds are defined in +// AArch64FixupKinds.h. +// +// Name Offset (bits) Size (bits) Flags +{ "fixup_a64_ld_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_adr_prel", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_adr_prel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_add_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst8_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst16_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst32_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst64_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst128_lo12", 0, 32, 0 }, +{ "fixup_a64_tstbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_condbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_uncondbr", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_call", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_movw_uabs_g0", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g0_nc", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g1", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g1_nc", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g2", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g2_nc", 0, 32, 0 }, +{ "fixup_a64_movw_uabs_g3", 0, 32, 0 }, +{ "fixup_a64_movw_sabs_g0", 0, 32, 0 }, +{ "fixup_a64_movw_sabs_g1", 0, 32, 0 }, +{ "fixup_a64_movw_sabs_g2", 0, 32, 0 }, +{ "fixup_a64_adr_prel_got_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_ld64_got_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_movw_dtprel_g2", 0, 32, 0 }, +{ "fixup_a64_movw_dtprel_g1", 0, 32, 0 }, +{ "fixup_a64_movw_dtprel_g1_nc", 0, 32, 0 }, +{ "fixup_a64_movw_dtprel_g0", 0, 32, 0 }, +{ "fixup_a64_movw_dtprel_g0_nc", 0, 32, 0 }, +{ "fixup_a64_add_dtprel_hi12", 0, 32, 0 }, +{ "fixup_a64_add_dtprel_lo12", 0, 32, 0 }, +{ "fixup_a64_add_dtprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst8_dtprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst8_dtprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst16_dtprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst16_dtprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst32_dtprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst32_dtprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst64_dtprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst64_dtprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_movw_gottprel_g1", 0, 32, 0 }, +{ "fixup_a64_movw_gottprel_g0_nc", 0, 32, 0 }, +{ "fixup_a64_adr_gottprel_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_ld64_gottprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ld_gottprel_prel19", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_movw_tprel_g2", 0, 32, 0 }, +{ "fixup_a64_movw_tprel_g1", 0, 32, 0 }, +{ "fixup_a64_movw_tprel_g1_nc", 0, 32, 0 }, +{ "fixup_a64_movw_tprel_g0", 0, 32, 0 }, +{ "fixup_a64_movw_tprel_g0_nc", 0, 32, 0 }, +{ "fixup_a64_add_tprel_hi12", 0, 32, 0 }, +{ "fixup_a64_add_tprel_lo12", 0, 32, 0 }, +{ "fixup_a64_add_tprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst8_tprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst8_tprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst16_tprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst16_tprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst32_tprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst32_tprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_ldst64_tprel_lo12", 0, 32, 0 }, +{ "fixup_a64_ldst64_tprel_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_tlsdesc_adr_page", 0, 32, MCFixupKindInfo::FKF_IsPCRel }, +{ "fixup_a64_tlsdesc_ld64_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_tlsdesc_add_lo12_nc", 0, 32, 0 }, +{ "fixup_a64_tlsdesc_call", 0, 0, 0 } + }; + if (Kind < FirstTargetFixupKind) + return MCAsmBackend::getFixupKindInfo(Kind); + + assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && + "Invalid kind!"); + return Infos[Kind - FirstTargetFixupKind]; + } + + void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, + uint64_t Value) const { + unsigned NumBytes = getFixupKindInfo(Fixup.getKind()).TargetSize / 8; + Value = adjustFixupValue(Fixup.getKind(), Value); + if (!Value) return; // Doesn't change encoding. + + unsigned Offset = Fixup.getOffset(); + assert(Offset + NumBytes <= DataSize && "Invalid fixup offset!"); + + // For each byte of the fragment that the fixup touches, mask in the bits + // from the fixup value. + for (unsigned i = 0; i != NumBytes; ++i) { + Data[Offset + i] |= uint8_t((Value >> (i * 8)) & 0xff); + } + } + + bool mayNeedRelaxation(const MCInst&) const { + return false; + } + + void relaxInstruction(const MCInst&, llvm::MCInst&) const { + llvm_unreachable("Cannot relax instructions"); + } + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createAArch64ELFObjectWriter(OS, OSABI); + } +}; + +} // end anonymous namespace + +bool +ELFAArch64AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, + uint64_t Value, + const MCRelaxableFragment *DF, + const MCAsmLayout &Layout) const { + // Correct for now. With all instructions 32-bit only very low-level + // considerations could make you select something which may fail. + return false; +} + + +bool AArch64AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { + // Can't emit NOP with size not multiple of 32-bits + if (Count % 4 != 0) + return false; + + uint64_t NumNops = Count / 4; + for (uint64_t i = 0; i != NumNops; ++i) + OW->Write32(0xd503201f); + + return true; +} + +static unsigned ADRImmBits(unsigned Value) { + unsigned lo2 = Value & 0x3; + unsigned hi19 = (Value & 0x1fffff) >> 2; + + return (hi19 << 5) | (lo2 << 29); +} + +static uint64_t adjustFixupValue(unsigned Kind, uint64_t Value) { + switch (Kind) { + default: + llvm_unreachable("Unknown fixup kind!"); + case FK_Data_2: + assert((int64_t)Value >= -32768 && + (int64_t)Value <= 65536 && + "Out of range ABS16 fixup"); + return Value; + case FK_Data_4: + assert((int64_t)Value >= -(1LL << 31) && + (int64_t)Value <= (1LL << 32) - 1 && + "Out of range ABS32 fixup"); + return Value; + case FK_Data_8: + return Value; + + case AArch64::fixup_a64_ld_gottprel_prel19: + // R_AARCH64_LD_GOTTPREL_PREL19: Set a load-literal immediate to bits 1F + // FFFC of G(TPREL(S+A)) - P; check -2^20 <= X < 2^20. + case AArch64::fixup_a64_ld_prel: + // R_AARCH64_LD_PREL_LO19: Sets a load-literal (immediate) value to bits + // 1F FFFC of S+A-P, checking that -2^20 <= S+A-P < 2^20. + assert((int64_t)Value >= -(1LL << 20) && + (int64_t)Value < (1LL << 20) && "Out of range LDR (lit) fixup"); + return (Value & 0x1ffffc) << 3; + + case AArch64::fixup_a64_adr_prel: + // R_AARCH64_ADR_PREL_LO21: Sets an ADR immediate value to bits 1F FFFF of + // the result of S+A-P, checking that -2^20 <= S+A-P < 2^20. + assert((int64_t)Value >= -(1LL << 20) && + (int64_t)Value < (1LL << 20) && "Out of range ADR fixup"); + return ADRImmBits(Value & 0x1fffff); + + case AArch64::fixup_a64_adr_prel_page: + // R_AARCH64_ADR_PREL_PG_HI21: Sets an ADRP immediate value to bits 1 FFFF + // F000 of the result of the operation, checking that -2^32 <= result < + // 2^32. + assert((int64_t)Value >= -(1LL << 32) && + (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup"); + return ADRImmBits((Value & 0x1fffff000ULL) >> 12); + + case AArch64::fixup_a64_add_dtprel_hi12: + // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits + // FF F000 of DTPREL(S+A), check 0 <= X < 2^24. + case AArch64::fixup_a64_add_tprel_hi12: + // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits + // FF F000 of TPREL(S+A), check 0 <= X < 2^24. + assert((int64_t)Value >= 0 && + (int64_t)Value < (1LL << 24) && "Out of range ADD fixup"); + return (Value & 0xfff000) >> 2; + + case AArch64::fixup_a64_add_dtprel_lo12: + // R_AARCH64_TLSLD_ADD_DTPREL_LO12: Set an ADD immediate field to bits + // FFF of DTPREL(S+A), check 0 <= X < 2^12. + case AArch64::fixup_a64_add_tprel_lo12: + // R_AARCH64_TLSLD_ADD_TPREL_LO12: Set an ADD immediate field to bits + // FFF of TPREL(S+A), check 0 <= X < 2^12. + assert((int64_t)Value >= 0 && + (int64_t)Value < (1LL << 12) && "Out of range ADD fixup"); + // ... fallthrough to no-checking versions ... + case AArch64::fixup_a64_add_dtprel_lo12_nc: + // R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC: Set an ADD immediate field to bits + // FFF of DTPREL(S+A) with no overflow check. + case AArch64::fixup_a64_add_tprel_lo12_nc: + // R_AARCH64_TLSLD_ADD_TPREL_LO12_NC: Set an ADD immediate field to bits + // FFF of TPREL(S+A) with no overflow check. + case AArch64::fixup_a64_tlsdesc_add_lo12_nc: + // R_AARCH64_TLSDESC_ADD_LO12_NC: Set an ADD immediate field to bits + // FFF of G(TLSDESC(S+A)), with no overflow check. + case AArch64::fixup_a64_add_lo12: + // R_AARCH64_ADD_ABS_LO12_NC: Sets an ADD immediate value to bits FFF of + // S+A, with no overflow check. + return (Value & 0xfff) << 10; + + case AArch64::fixup_a64_ldst8_dtprel_lo12: + // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF + // of DTPREL(S+A), check 0 <= X < 2^12. + case AArch64::fixup_a64_ldst8_tprel_lo12: + // R_AARCH64_TLSLE_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF + // of DTPREL(S+A), check 0 <= X < 2^12. + assert((int64_t) Value >= 0 && + (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup"); + // ... fallthrough to no-checking versions ... + case AArch64::fixup_a64_ldst8_dtprel_lo12_nc: + // R_AARCH64_TLSLD_LDST8_DTPREL_LO12: Set an LD/ST offset field to bits FFF + // of DTPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst8_tprel_lo12_nc: + // R_AARCH64_TLSLD_LDST8_TPREL_LO12: Set an LD/ST offset field to bits FFF + // of TPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst8_lo12: + // R_AARCH64_LDST8_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFF + // of S+A, with no overflow check. + return (Value & 0xfff) << 10; + + case AArch64::fixup_a64_ldst16_dtprel_lo12: + // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE + // of DTPREL(S+A), check 0 <= X < 2^12. + case AArch64::fixup_a64_ldst16_tprel_lo12: + // R_AARCH64_TLSLE_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE + // of DTPREL(S+A), check 0 <= X < 2^12. + assert((int64_t) Value >= 0 && + (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup"); + // ... fallthrough to no-checking versions ... + case AArch64::fixup_a64_ldst16_dtprel_lo12_nc: + // R_AARCH64_TLSLD_LDST16_DTPREL_LO12: Set an LD/ST offset field to bits FFE + // of DTPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst16_tprel_lo12_nc: + // R_AARCH64_TLSLD_LDST16_TPREL_LO12: Set an LD/ST offset field to bits FFE + // of TPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst16_lo12: + // R_AARCH64_LDST16_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFE + // of S+A, with no overflow check. + return (Value & 0xffe) << 9; + + case AArch64::fixup_a64_ldst32_dtprel_lo12: + // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC + // of DTPREL(S+A), check 0 <= X < 2^12. + case AArch64::fixup_a64_ldst32_tprel_lo12: + // R_AARCH64_TLSLE_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC + // of DTPREL(S+A), check 0 <= X < 2^12. + assert((int64_t) Value >= 0 && + (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup"); + // ... fallthrough to no-checking versions ... + case AArch64::fixup_a64_ldst32_dtprel_lo12_nc: + // R_AARCH64_TLSLD_LDST32_DTPREL_LO12: Set an LD/ST offset field to bits FFC + // of DTPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst32_tprel_lo12_nc: + // R_AARCH64_TLSLD_LDST32_TPREL_LO12: Set an LD/ST offset field to bits FFC + // of TPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst32_lo12: + // R_AARCH64_LDST32_ABS_LO12_NC: Sets an LD/ST immediate value to bits FFC + // of S+A, with no overflow check. + return (Value & 0xffc) << 8; + + case AArch64::fixup_a64_ldst64_dtprel_lo12: + // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8 + // of DTPREL(S+A), check 0 <= X < 2^12. + case AArch64::fixup_a64_ldst64_tprel_lo12: + // R_AARCH64_TLSLE_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8 + // of DTPREL(S+A), check 0 <= X < 2^12. + assert((int64_t) Value >= 0 && + (int64_t) Value < (1LL << 12) && "Out of range LD/ST fixup"); + // ... fallthrough to no-checking versions ... + case AArch64::fixup_a64_ldst64_dtprel_lo12_nc: + // R_AARCH64_TLSLD_LDST64_DTPREL_LO12: Set an LD/ST offset field to bits FF8 + // of DTPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst64_tprel_lo12_nc: + // R_AARCH64_TLSLD_LDST64_TPREL_LO12: Set an LD/ST offset field to bits FF8 + // of TPREL(S+A), with no overflow check. + case AArch64::fixup_a64_ldst64_lo12: + // R_AARCH64_LDST64_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF8 + // of S+A, with no overflow check. + return (Value & 0xff8) << 7; + + case AArch64::fixup_a64_ldst128_lo12: + // R_AARCH64_LDST128_ABS_LO12_NC: Sets an LD/ST immediate value to bits FF0 + // of S+A, with no overflow check. + return (Value & 0xff0) << 6; + + case AArch64::fixup_a64_movw_uabs_g0: + // R_AARCH64_MOVW_UABS_G0: Sets a MOVZ immediate field to bits FFFF of S+A + // with a check that S+A < 2^16 + assert(Value <= 0xffff && "Out of range move wide fixup"); + return (Value & 0xffff) << 5; + + case AArch64::fixup_a64_movw_dtprel_g0_nc: + // R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC: Sets a MOVK immediate field to bits + // FFFF of DTPREL(S+A) with no overflow check. + case AArch64::fixup_a64_movw_gottprel_g0_nc: + // R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC: Sets a MOVK immediate field to bits + // FFFF of G(TPREL(S+A)) - GOT with no overflow check. + case AArch64::fixup_a64_movw_tprel_g0_nc: + // R_AARCH64_TLSLE_MOVW_TPREL_G0_NC: Sets a MOVK immediate field to bits + // FFFF of TPREL(S+A) with no overflow check. + case AArch64::fixup_a64_movw_uabs_g0_nc: + // R_AARCH64_MOVW_UABS_G0_NC: Sets a MOVK immediate field to bits FFFF of + // S+A with no overflow check. + return (Value & 0xffff) << 5; + + case AArch64::fixup_a64_movw_uabs_g1: + // R_AARCH64_MOVW_UABS_G1: Sets a MOVZ immediate field to bits FFFF0000 of + // S+A with a check that S+A < 2^32 + assert(Value <= 0xffffffffull && "Out of range move wide fixup"); + return ((Value >> 16) & 0xffff) << 5; + + case AArch64::fixup_a64_movw_dtprel_g1_nc: + // R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC: Set a MOVK immediate field + // to bits FFFF0000 of DTPREL(S+A), with no overflow check. + case AArch64::fixup_a64_movw_tprel_g1_nc: + // R_AARCH64_TLSLD_MOVW_TPREL_G1_NC: Set a MOVK immediate field + // to bits FFFF0000 of TPREL(S+A), with no overflow check. + case AArch64::fixup_a64_movw_uabs_g1_nc: + // R_AARCH64_MOVW_UABS_G1_NC: Sets a MOVK immediate field to bits + // FFFF0000 of S+A with no overflow check. + return ((Value >> 16) & 0xffff) << 5; + + case AArch64::fixup_a64_movw_uabs_g2: + // R_AARCH64_MOVW_UABS_G2: Sets a MOVZ immediate field to bits FFFF 0000 + // 0000 of S+A with a check that S+A < 2^48 + assert(Value <= 0xffffffffffffull && "Out of range move wide fixup"); + return ((Value >> 32) & 0xffff) << 5; + + case AArch64::fixup_a64_movw_uabs_g2_nc: + // R_AARCH64_MOVW_UABS_G2: Sets a MOVK immediate field to bits FFFF 0000 + // 0000 of S+A with no overflow check. + return ((Value >> 32) & 0xffff) << 5; + + case AArch64::fixup_a64_movw_uabs_g3: + // R_AARCH64_MOVW_UABS_G3: Sets a MOVZ immediate field to bits FFFF 0000 + // 0000 0000 of S+A (no overflow check needed) + return ((Value >> 48) & 0xffff) << 5; + + case AArch64::fixup_a64_movw_dtprel_g0: + // R_AARCH64_TLSLD_MOVW_DTPREL_G0: Set a MOV[NZ] immediate field + // to bits FFFF of DTPREL(S+A). + case AArch64::fixup_a64_movw_tprel_g0: + // R_AARCH64_TLSLE_MOVW_TPREL_G0: Set a MOV[NZ] immediate field to + // bits FFFF of TPREL(S+A). + case AArch64::fixup_a64_movw_sabs_g0: { + // R_AARCH64_MOVW_SABS_G0: Sets MOV[NZ] immediate field using bits FFFF of + // S+A (see notes below); check -2^16 <= S+A < 2^16. (notes say that we + // should convert between MOVN and MOVZ to achieve our goals). + int64_t Signed = Value; + assert(Signed >= -(1LL << 16) && Signed < (1LL << 16) + && "Out of range move wide fixup"); + if (Signed >= 0) { + Value = (Value & 0xffff) << 5; + // Bit 30 converts the MOVN encoding into a MOVZ + Value |= 1 << 30; + } else { + // MCCodeEmitter should have encoded a MOVN, which is fine. + Value = (~Value & 0xffff) << 5; + } + return Value; + } + + case AArch64::fixup_a64_movw_dtprel_g1: + // R_AARCH64_TLSLD_MOVW_DTPREL_G1: Set a MOV[NZ] immediate field + // to bits FFFF0000 of DTPREL(S+A). + case AArch64::fixup_a64_movw_gottprel_g1: + // R_AARCH64_TLSIE_MOVW_GOTTPREL_G1: Set a MOV[NZ] immediate field + // to bits FFFF0000 of G(TPREL(S+A)) - GOT. + case AArch64::fixup_a64_movw_tprel_g1: + // R_AARCH64_TLSLE_MOVW_TPREL_G1: Set a MOV[NZ] immediate field to + // bits FFFF0000 of TPREL(S+A). + case AArch64::fixup_a64_movw_sabs_g1: { + // R_AARCH64_MOVW_SABS_G1: Sets MOV[NZ] immediate field using bits FFFF 0000 + // of S+A (see notes below); check -2^32 <= S+A < 2^32. (notes say that we + // should convert between MOVN and MOVZ to achieve our goals). + int64_t Signed = Value; + assert(Signed >= -(1LL << 32) && Signed < (1LL << 32) + && "Out of range move wide fixup"); + if (Signed >= 0) { + Value = ((Value >> 16) & 0xffff) << 5; + // Bit 30 converts the MOVN encoding into a MOVZ + Value |= 1 << 30; + } else { + Value = ((~Value >> 16) & 0xffff) << 5; + } + return Value; + } + + case AArch64::fixup_a64_movw_dtprel_g2: + // R_AARCH64_TLSLD_MOVW_DTPREL_G2: Set a MOV[NZ] immediate field + // to bits FFFF 0000 0000 of DTPREL(S+A). + case AArch64::fixup_a64_movw_tprel_g2: + // R_AARCH64_TLSLE_MOVW_TPREL_G2: Set a MOV[NZ] immediate field to + // bits FFFF 0000 0000 of TPREL(S+A). + case AArch64::fixup_a64_movw_sabs_g2: { + // R_AARCH64_MOVW_SABS_G2: Sets MOV[NZ] immediate field using bits FFFF 0000 + // 0000 of S+A (see notes below); check -2^48 <= S+A < 2^48. (notes say that + // we should convert between MOVN and MOVZ to achieve our goals). + int64_t Signed = Value; + assert(Signed >= -(1LL << 48) && Signed < (1LL << 48) + && "Out of range move wide fixup"); + if (Signed >= 0) { + Value = ((Value >> 32) & 0xffff) << 5; + // Bit 30 converts the MOVN encoding into a MOVZ + Value |= 1 << 30; + } else { + Value = ((~Value >> 32) & 0xffff) << 5; + } + return Value; + } + + case AArch64::fixup_a64_tstbr: + // R_AARCH64_TSTBR14: Sets the immediate field of a TBZ/TBNZ instruction to + // bits FFFC of S+A-P, checking -2^15 <= S+A-P < 2^15. + assert((int64_t)Value >= -(1LL << 15) && + (int64_t)Value < (1LL << 15) && "Out of range TBZ/TBNZ fixup"); + return (Value & 0xfffc) << (5 - 2); + + case AArch64::fixup_a64_condbr: + // R_AARCH64_CONDBR19: Sets the immediate field of a conditional branch + // instruction to bits 1FFFFC of S+A-P, checking -2^20 <= S+A-P < 2^20. + assert((int64_t)Value >= -(1LL << 20) && + (int64_t)Value < (1LL << 20) && "Out of range B.cond fixup"); + return (Value & 0x1ffffc) << (5 - 2); + + case AArch64::fixup_a64_uncondbr: + // R_AARCH64_JUMP26 same as below (except to a linker, possibly). + case AArch64::fixup_a64_call: + // R_AARCH64_CALL26: Sets a CALL immediate field to bits FFFFFFC of S+A-P, + // checking that -2^27 <= S+A-P < 2^27. + assert((int64_t)Value >= -(1LL << 27) && + (int64_t)Value < (1LL << 27) && "Out of range branch fixup"); + return (Value & 0xffffffc) >> 2; + + case AArch64::fixup_a64_adr_gottprel_page: + // R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21: Set an ADRP immediate field to bits + // 1FFFFF000 of Page(G(TPREL(S+A))) - Page(P); check -2^32 <= X < 2^32. + case AArch64::fixup_a64_tlsdesc_adr_page: + // R_AARCH64_TLSDESC_ADR_PAGE: Set an ADRP immediate field to bits 1FFFFF000 + // of Page(G(TLSDESC(S+A))) - Page(P); check -2^32 <= X < 2^32. + case AArch64::fixup_a64_adr_prel_got_page: + // R_AARCH64_ADR_GOT_PAGE: Sets the immediate value of an ADRP to bits + // 1FFFFF000 of the operation, checking that -2^32 < Page(G(S))-Page(GOT) < + // 2^32. + assert((int64_t)Value >= -(1LL << 32) && + (int64_t)Value < (1LL << 32) && "Out of range ADRP fixup"); + return ADRImmBits((Value & 0x1fffff000ULL) >> 12); + + case AArch64::fixup_a64_ld64_gottprel_lo12_nc: + // R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC: Set an LD offset field to bits FF8 + // of X, with no overflow check. Check that X & 7 == 0. + case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc: + // R_AARCH64_TLSDESC_LD64_LO12_NC: Set an LD offset field to bits FF8 of + // G(TLSDESC(S+A)), with no overflow check. Check that X & 7 == 0. + case AArch64::fixup_a64_ld64_got_lo12_nc: + // R_AARCH64_LD64_GOT_LO12_NC: Sets the LD/ST immediate field to bits FF8 of + // G(S) with no overflow check. Check X & 7 == 0 + assert(((int64_t)Value & 7) == 0 && "Misaligned fixup"); + return (Value & 0xff8) << 7; + + case AArch64::fixup_a64_tlsdesc_call: + // R_AARCH64_TLSDESC_CALL: For relaxation only. + return 0; + } +} + +MCAsmBackend * +llvm::createAArch64AsmBackend(const Target &T, const MCRegisterInfo &MRI, + StringRef TT, StringRef CPU) { + Triple TheTriple(TT); + return new ELFAArch64AsmBackend(T, TT, TheTriple.getOS()); +} diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp new file mode 100644 index 000000000000..4bcc65dfca27 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp @@ -0,0 +1,292 @@ +//===-- AArch64ELFObjectWriter.cpp - AArch64 ELF Writer -------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file handles ELF-specific object emission, converting LLVM's internal +// fixups into the appropriate relocations. +// +//===----------------------------------------------------------------------===// + +#include "MCTargetDesc/AArch64FixupKinds.h" +#include "MCTargetDesc/AArch64MCTargetDesc.h" +#include "llvm/MC/MCELFObjectWriter.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/ErrorHandling.h" + +using namespace llvm; + +namespace { +class AArch64ELFObjectWriter : public MCELFObjectTargetWriter { +public: + AArch64ELFObjectWriter(uint8_t OSABI); + + virtual ~AArch64ELFObjectWriter(); + +protected: + virtual unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup, + bool IsPCRel, bool IsRelocWithSymbol, + int64_t Addend) const; +private: +}; +} + +AArch64ELFObjectWriter::AArch64ELFObjectWriter(uint8_t OSABI) + : MCELFObjectTargetWriter(/*Is64Bit*/ true, OSABI, ELF::EM_AARCH64, + /*HasRelocationAddend*/ true) +{} + +AArch64ELFObjectWriter::~AArch64ELFObjectWriter() +{} + +unsigned AArch64ELFObjectWriter::GetRelocType(const MCValue &Target, + const MCFixup &Fixup, + bool IsPCRel, + bool IsRelocWithSymbol, + int64_t Addend) const { + unsigned Type; + if (IsPCRel) { + switch ((unsigned)Fixup.getKind()) { + default: + llvm_unreachable("Unimplemented fixup -> relocation"); + case FK_Data_8: + return ELF::R_AARCH64_PREL64; + case FK_Data_4: + return ELF::R_AARCH64_PREL32; + case FK_Data_2: + return ELF::R_AARCH64_PREL16; + case AArch64::fixup_a64_ld_prel: + Type = ELF::R_AARCH64_LD_PREL_LO19; + break; + case AArch64::fixup_a64_adr_prel: + Type = ELF::R_AARCH64_ADR_PREL_LO21; + break; + case AArch64::fixup_a64_adr_prel_page: + Type = ELF::R_AARCH64_ADR_PREL_PG_HI21; + break; + case AArch64::fixup_a64_adr_prel_got_page: + Type = ELF::R_AARCH64_ADR_GOT_PAGE; + break; + case AArch64::fixup_a64_tstbr: + Type = ELF::R_AARCH64_TSTBR14; + break; + case AArch64::fixup_a64_condbr: + Type = ELF::R_AARCH64_CONDBR19; + break; + case AArch64::fixup_a64_uncondbr: + Type = ELF::R_AARCH64_JUMP26; + break; + case AArch64::fixup_a64_call: + Type = ELF::R_AARCH64_CALL26; + break; + case AArch64::fixup_a64_adr_gottprel_page: + Type = ELF::R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21; + break; + case AArch64::fixup_a64_ld_gottprel_prel19: + Type = ELF::R_AARCH64_TLSIE_LD_GOTTPREL_PREL19; + break; + case AArch64::fixup_a64_tlsdesc_adr_page: + Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE; + break; + } + } else { + switch ((unsigned)Fixup.getKind()) { + default: + llvm_unreachable("Unimplemented fixup -> relocation"); + case FK_Data_8: + return ELF::R_AARCH64_ABS64; + case FK_Data_4: + return ELF::R_AARCH64_ABS32; + case FK_Data_2: + return ELF::R_AARCH64_ABS16; + case AArch64::fixup_a64_add_lo12: + Type = ELF::R_AARCH64_ADD_ABS_LO12_NC; + break; + case AArch64::fixup_a64_ld64_got_lo12_nc: + Type = ELF::R_AARCH64_LD64_GOT_LO12_NC; + break; + case AArch64::fixup_a64_ldst8_lo12: + Type = ELF::R_AARCH64_LDST8_ABS_LO12_NC; + break; + case AArch64::fixup_a64_ldst16_lo12: + Type = ELF::R_AARCH64_LDST16_ABS_LO12_NC; + break; + case AArch64::fixup_a64_ldst32_lo12: + Type = ELF::R_AARCH64_LDST32_ABS_LO12_NC; + break; + case AArch64::fixup_a64_ldst64_lo12: + Type = ELF::R_AARCH64_LDST64_ABS_LO12_NC; + break; + case AArch64::fixup_a64_ldst128_lo12: + Type = ELF::R_AARCH64_LDST128_ABS_LO12_NC; + break; + case AArch64::fixup_a64_movw_uabs_g0: + Type = ELF::R_AARCH64_MOVW_UABS_G0; + break; + case AArch64::fixup_a64_movw_uabs_g0_nc: + Type = ELF::R_AARCH64_MOVW_UABS_G0_NC; + break; + case AArch64::fixup_a64_movw_uabs_g1: + Type = ELF::R_AARCH64_MOVW_UABS_G1; + break; + case AArch64::fixup_a64_movw_uabs_g1_nc: + Type = ELF::R_AARCH64_MOVW_UABS_G1_NC; + break; + case AArch64::fixup_a64_movw_uabs_g2: + Type = ELF::R_AARCH64_MOVW_UABS_G2; + break; + case AArch64::fixup_a64_movw_uabs_g2_nc: + Type = ELF::R_AARCH64_MOVW_UABS_G2_NC; + break; + case AArch64::fixup_a64_movw_uabs_g3: + Type = ELF::R_AARCH64_MOVW_UABS_G3; + break; + case AArch64::fixup_a64_movw_sabs_g0: + Type = ELF::R_AARCH64_MOVW_SABS_G0; + break; + case AArch64::fixup_a64_movw_sabs_g1: + Type = ELF::R_AARCH64_MOVW_SABS_G1; + break; + case AArch64::fixup_a64_movw_sabs_g2: + Type = ELF::R_AARCH64_MOVW_SABS_G2; + break; + + // TLS Local-dynamic block + case AArch64::fixup_a64_movw_dtprel_g2: + Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G2; + break; + case AArch64::fixup_a64_movw_dtprel_g1: + Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1; + break; + case AArch64::fixup_a64_movw_dtprel_g1_nc: + Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC; + break; + case AArch64::fixup_a64_movw_dtprel_g0: + Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0; + break; + case AArch64::fixup_a64_movw_dtprel_g0_nc: + Type = ELF::R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC; + break; + case AArch64::fixup_a64_add_dtprel_hi12: + Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_HI12; + break; + case AArch64::fixup_a64_add_dtprel_lo12: + Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12; + break; + case AArch64::fixup_a64_add_dtprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst8_dtprel_lo12: + Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12; + break; + case AArch64::fixup_a64_ldst8_dtprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst16_dtprel_lo12: + Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12; + break; + case AArch64::fixup_a64_ldst16_dtprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst32_dtprel_lo12: + Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12; + break; + case AArch64::fixup_a64_ldst32_dtprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst64_dtprel_lo12: + Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12; + break; + case AArch64::fixup_a64_ldst64_dtprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC; + break; + + // TLS initial-exec block + case AArch64::fixup_a64_movw_gottprel_g1: + Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G1; + break; + case AArch64::fixup_a64_movw_gottprel_g0_nc: + Type = ELF::R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC; + break; + case AArch64::fixup_a64_ld64_gottprel_lo12_nc: + Type = ELF::R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC; + break; + + // TLS local-exec block + case AArch64::fixup_a64_movw_tprel_g2: + Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G2; + break; + case AArch64::fixup_a64_movw_tprel_g1: + Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1; + break; + case AArch64::fixup_a64_movw_tprel_g1_nc: + Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G1_NC; + break; + case AArch64::fixup_a64_movw_tprel_g0: + Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0; + break; + case AArch64::fixup_a64_movw_tprel_g0_nc: + Type = ELF::R_AARCH64_TLSLE_MOVW_TPREL_G0_NC; + break; + case AArch64::fixup_a64_add_tprel_hi12: + Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_HI12; + break; + case AArch64::fixup_a64_add_tprel_lo12: + Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12; + break; + case AArch64::fixup_a64_add_tprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLE_ADD_TPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst8_tprel_lo12: + Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12; + break; + case AArch64::fixup_a64_ldst8_tprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst16_tprel_lo12: + Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12; + break; + case AArch64::fixup_a64_ldst16_tprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst32_tprel_lo12: + Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12; + break; + case AArch64::fixup_a64_ldst32_tprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC; + break; + case AArch64::fixup_a64_ldst64_tprel_lo12: + Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12; + break; + case AArch64::fixup_a64_ldst64_tprel_lo12_nc: + Type = ELF::R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC; + break; + + // TLS general-dynamic block + case AArch64::fixup_a64_tlsdesc_adr_page: + Type = ELF::R_AARCH64_TLSDESC_ADR_PAGE; + break; + case AArch64::fixup_a64_tlsdesc_ld64_lo12_nc: + Type = ELF::R_AARCH64_TLSDESC_LD64_LO12_NC; + break; + case AArch64::fixup_a64_tlsdesc_add_lo12_nc: + Type = ELF::R_AARCH64_TLSDESC_ADD_LO12_NC; + break; + case AArch64::fixup_a64_tlsdesc_call: + Type = ELF::R_AARCH64_TLSDESC_CALL; + break; + } + } + + return Type; +} + +MCObjectWriter *llvm::createAArch64ELFObjectWriter(raw_ostream &OS, + uint8_t OSABI) { + MCELFObjectTargetWriter *MOTW = new AArch64ELFObjectWriter(OSABI); + return createELFObjectWriter(MOTW, OS, /*IsLittleEndian=*/true); +} diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp new file mode 100644 index 000000000000..a64c463f9e5c --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.cpp @@ -0,0 +1,159 @@ +//===- lib/MC/AArch64ELFStreamer.cpp - ELF Object Output for AArch64 ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file assembles .s files and emits AArch64 ELF .o object files. Different +// from generic ELF streamer in emitting mapping symbols ($x and $d) to delimit +// regions of data and code. +// +//===----------------------------------------------------------------------===// + +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/ADT/SmallPtrSet.h" +#include "llvm/ADT/Twine.h" +#include "llvm/MC/MCAsmBackend.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCELF.h" +#include "llvm/MC/MCELFStreamer.h" +#include "llvm/MC/MCELFSymbolFlags.h" +#include "llvm/MC/MCExpr.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCObjectStreamer.h" +#include "llvm/MC/MCSection.h" +#include "llvm/MC/MCSectionELF.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSymbol.h" +#include "llvm/MC/MCValue.h" +#include "llvm/Support/Debug.h" +#include "llvm/Support/ELF.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +namespace { + +/// Extend the generic ELFStreamer class so that it can emit mapping symbols at +/// the appropriate points in the object files. These symbols are defined in the +/// AArch64 ELF ABI: +/// infocenter.arm.com/help/topic/com.arm.doc.ihi0056a/IHI0056A_aaelf64.pdf +/// +/// In brief: $x or $d should be emitted at the start of each contiguous region +/// of A64 code or data in a section. In practice, this emission does not rely +/// on explicit assembler directives but on inherent properties of the +/// directives doing the emission (e.g. ".byte" is data, "add x0, x0, x0" an +/// instruction). +/// +/// As a result this system is orthogonal to the DataRegion infrastructure used +/// by MachO. Beware! +class AArch64ELFStreamer : public MCELFStreamer { +public: + AArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, raw_ostream &OS, + MCCodeEmitter *Emitter) + : MCELFStreamer(Context, 0, TAB, OS, Emitter), MappingSymbolCounter(0), + LastEMS(EMS_None) {} + + ~AArch64ELFStreamer() {} + + virtual void ChangeSection(const MCSection *Section, + const MCExpr *Subsection) { + // We have to keep track of the mapping symbol state of any sections we + // use. Each one should start off as EMS_None, which is provided as the + // default constructor by DenseMap::lookup. + LastMappingSymbols[getPreviousSection().first] = LastEMS; + LastEMS = LastMappingSymbols.lookup(Section); + + MCELFStreamer::ChangeSection(Section, Subsection); + } + + /// This function is the one used to emit instruction data into the ELF + /// streamer. We override it to add the appropriate mapping symbol if + /// necessary. + virtual void EmitInstruction(const MCInst& Inst) { + EmitA64MappingSymbol(); + MCELFStreamer::EmitInstruction(Inst); + } + + /// This is one of the functions used to emit data into an ELF section, so the + /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d) + /// if necessary. + virtual void EmitBytes(StringRef Data) { + EmitDataMappingSymbol(); + MCELFStreamer::EmitBytes(Data); + } + + /// This is one of the functions used to emit data into an ELF section, so the + /// AArch64 streamer overrides it to add the appropriate mapping symbol ($d) + /// if necessary. + virtual void EmitValueImpl(const MCExpr *Value, unsigned Size) { + EmitDataMappingSymbol(); + MCELFStreamer::EmitValueImpl(Value, Size); + } + +private: + enum ElfMappingSymbol { + EMS_None, + EMS_A64, + EMS_Data + }; + + void EmitDataMappingSymbol() { + if (LastEMS == EMS_Data) return; + EmitMappingSymbol("$d"); + LastEMS = EMS_Data; + } + + void EmitA64MappingSymbol() { + if (LastEMS == EMS_A64) return; + EmitMappingSymbol("$x"); + LastEMS = EMS_A64; + } + + void EmitMappingSymbol(StringRef Name) { + MCSymbol *Start = getContext().CreateTempSymbol(); + EmitLabel(Start); + + MCSymbol *Symbol = + getContext().GetOrCreateSymbol(Name + "." + + Twine(MappingSymbolCounter++)); + + MCSymbolData &SD = getAssembler().getOrCreateSymbolData(*Symbol); + MCELF::SetType(SD, ELF::STT_NOTYPE); + MCELF::SetBinding(SD, ELF::STB_LOCAL); + SD.setExternal(false); + AssignSection(Symbol, getCurrentSection().first); + + const MCExpr *Value = MCSymbolRefExpr::Create(Start, getContext()); + Symbol->setVariableValue(Value); + } + + int64_t MappingSymbolCounter; + + DenseMap<const MCSection *, ElfMappingSymbol> LastMappingSymbols; + ElfMappingSymbol LastEMS; + + /// @} +}; +} + +namespace llvm { + MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, + raw_ostream &OS, MCCodeEmitter *Emitter, + bool RelaxAll, bool NoExecStack) { + AArch64ELFStreamer *S = new AArch64ELFStreamer(Context, TAB, OS, Emitter); + if (RelaxAll) + S->getAssembler().setRelaxAll(true); + if (NoExecStack) + S->getAssembler().setNoExecStack(true); + return S; + } +} + + diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h new file mode 100644 index 000000000000..5a89ca50cee8 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64ELFStreamer.h @@ -0,0 +1,27 @@ +//===-- AArch64ELFStreamer.h - ELF Streamer for AArch64 ---------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements ELF streamer information for the AArch64 backend. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64_ELF_STREAMER_H +#define LLVM_AARCH64_ELF_STREAMER_H + +#include "llvm/MC/MCELFStreamer.h" + +namespace llvm { + + MCELFStreamer* createAArch64ELFStreamer(MCContext &Context, MCAsmBackend &TAB, + raw_ostream &OS, + MCCodeEmitter *Emitter, + bool RelaxAll, bool NoExecStack); +} + +#endif // AArch64_ELF_STREAMER_H diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h new file mode 100644 index 000000000000..eeb122d38494 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64FixupKinds.h @@ -0,0 +1,113 @@ +//=- AArch64/AArch64FixupKinds.h - AArch64 Specific Fixup Entries -*- C++ -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes the LLVM fixups applied to MCInsts in the AArch64 +// backend. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64_AARCH64FIXUPKINDS_H +#define LLVM_AARCH64_AARCH64FIXUPKINDS_H + +#include "llvm/MC/MCFixup.h" + +namespace llvm { + namespace AArch64 { + enum Fixups { + fixup_a64_ld_prel = FirstTargetFixupKind, + fixup_a64_adr_prel, + fixup_a64_adr_prel_page, + + fixup_a64_add_lo12, + + fixup_a64_ldst8_lo12, + fixup_a64_ldst16_lo12, + fixup_a64_ldst32_lo12, + fixup_a64_ldst64_lo12, + fixup_a64_ldst128_lo12, + + fixup_a64_tstbr, + fixup_a64_condbr, + fixup_a64_uncondbr, + fixup_a64_call, + + fixup_a64_movw_uabs_g0, + fixup_a64_movw_uabs_g0_nc, + fixup_a64_movw_uabs_g1, + fixup_a64_movw_uabs_g1_nc, + fixup_a64_movw_uabs_g2, + fixup_a64_movw_uabs_g2_nc, + fixup_a64_movw_uabs_g3, + + fixup_a64_movw_sabs_g0, + fixup_a64_movw_sabs_g1, + fixup_a64_movw_sabs_g2, + + fixup_a64_adr_prel_got_page, + fixup_a64_ld64_got_lo12_nc, + + // Produce offsets relative to the module's dynamic TLS area. + fixup_a64_movw_dtprel_g2, + fixup_a64_movw_dtprel_g1, + fixup_a64_movw_dtprel_g1_nc, + fixup_a64_movw_dtprel_g0, + fixup_a64_movw_dtprel_g0_nc, + fixup_a64_add_dtprel_hi12, + fixup_a64_add_dtprel_lo12, + fixup_a64_add_dtprel_lo12_nc, + fixup_a64_ldst8_dtprel_lo12, + fixup_a64_ldst8_dtprel_lo12_nc, + fixup_a64_ldst16_dtprel_lo12, + fixup_a64_ldst16_dtprel_lo12_nc, + fixup_a64_ldst32_dtprel_lo12, + fixup_a64_ldst32_dtprel_lo12_nc, + fixup_a64_ldst64_dtprel_lo12, + fixup_a64_ldst64_dtprel_lo12_nc, + + // Produce the GOT entry containing a variable's address in TLS's + // initial-exec mode. + fixup_a64_movw_gottprel_g1, + fixup_a64_movw_gottprel_g0_nc, + fixup_a64_adr_gottprel_page, + fixup_a64_ld64_gottprel_lo12_nc, + fixup_a64_ld_gottprel_prel19, + + // Produce offsets relative to the thread pointer: TPIDR_EL0. + fixup_a64_movw_tprel_g2, + fixup_a64_movw_tprel_g1, + fixup_a64_movw_tprel_g1_nc, + fixup_a64_movw_tprel_g0, + fixup_a64_movw_tprel_g0_nc, + fixup_a64_add_tprel_hi12, + fixup_a64_add_tprel_lo12, + fixup_a64_add_tprel_lo12_nc, + fixup_a64_ldst8_tprel_lo12, + fixup_a64_ldst8_tprel_lo12_nc, + fixup_a64_ldst16_tprel_lo12, + fixup_a64_ldst16_tprel_lo12_nc, + fixup_a64_ldst32_tprel_lo12, + fixup_a64_ldst32_tprel_lo12_nc, + fixup_a64_ldst64_tprel_lo12, + fixup_a64_ldst64_tprel_lo12_nc, + + // Produce the special fixups used by the general-dynamic TLS model. + fixup_a64_tlsdesc_adr_page, + fixup_a64_tlsdesc_ld64_lo12_nc, + fixup_a64_tlsdesc_add_lo12_nc, + fixup_a64_tlsdesc_call, + + + // Marker + LastTargetFixupKind, + NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind + }; + } +} + +#endif diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp new file mode 100644 index 000000000000..add874c12019 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.cpp @@ -0,0 +1,42 @@ +//===-- AArch64MCAsmInfo.cpp - AArch64 asm properties ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declarations of the AArch64MCAsmInfo properties. +// +//===----------------------------------------------------------------------===// + +#include "AArch64MCAsmInfo.h" + +using namespace llvm; + +AArch64ELFMCAsmInfo::AArch64ELFMCAsmInfo() { + PointerSize = 8; + + // ".comm align is in bytes but .align is pow-2." + AlignmentIsInBytes = false; + + CommentString = "//"; + PrivateGlobalPrefix = ".L"; + Code32Directive = ".code\t32"; + + Data16bitsDirective = "\t.hword\t"; + Data32bitsDirective = "\t.word\t"; + Data64bitsDirective = "\t.xword\t"; + + UseDataRegionDirectives = true; + + HasLEB128 = true; + SupportsDebugInformation = true; + + // Exceptions handling + ExceptionsType = ExceptionHandling::DwarfCFI; +} + +// Pin the vtable to this file. +void AArch64ELFMCAsmInfo::anchor() {} diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h new file mode 100644 index 000000000000..d1dd285c832c --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCAsmInfo.h @@ -0,0 +1,29 @@ +//==-- AArch64MCAsmInfo.h - AArch64 asm properties -------------*- C++ -*--===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the declaration of the AArch64MCAsmInfo class. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64TARGETASMINFO_H +#define LLVM_AARCH64TARGETASMINFO_H + +#include "llvm/MC/MCAsmInfoELF.h" + +namespace llvm { + +struct AArch64ELFMCAsmInfo : public MCAsmInfoELF { + explicit AArch64ELFMCAsmInfo(); +private: + virtual void anchor(); +}; + +} // namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp new file mode 100644 index 000000000000..b41c566f612b --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -0,0 +1,566 @@ +//=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code =// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file implements the AArch64MCCodeEmitter class. +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "mccodeemitter" +#include "MCTargetDesc/AArch64FixupKinds.h" +#include "MCTargetDesc/AArch64MCExpr.h" +#include "MCTargetDesc/AArch64MCTargetDesc.h" +#include "Utils/AArch64BaseInfo.h" +#include "llvm/MC/MCCodeEmitter.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCInst.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/raw_ostream.h" + +using namespace llvm; + +namespace { +class AArch64MCCodeEmitter : public MCCodeEmitter { + AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; + void operator=(const AArch64MCCodeEmitter &) LLVM_DELETED_FUNCTION; + MCContext &Ctx; + +public: + AArch64MCCodeEmitter(MCContext &ctx) : Ctx(ctx) {} + + ~AArch64MCCodeEmitter() {} + + unsigned getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + template<int MemSize> + unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + return getOffsetUImm12OpValue(MI, OpIdx, Fixups, MemSize); + } + + unsigned getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + int MemSize) const; + + unsigned getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getShiftRightImm8(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRightImm16(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRightImm32(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftRightImm64(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getShiftLeftImm8(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftLeftImm16(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftLeftImm32(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + unsigned getShiftLeftImm64(const MCInst &MI, unsigned Op, + SmallVectorImpl<MCFixup> &Fixups) const; + + // Labels are handled mostly the same way: a symbol is needed, and + // just gets some fixup attached. + template<AArch64::Fixups fixupDesired> + unsigned getLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + unsigned getLoadLitLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + + unsigned getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const; + + + unsigned getAddressWithFixup(const MCOperand &MO, + unsigned FixupKind, + SmallVectorImpl<MCFixup> &Fixups) const; + + + // getBinaryCodeForInstr - TableGen'erated function for getting the + // binary encoding for an instruction. + uint64_t getBinaryCodeForInstr(const MCInst &MI, + SmallVectorImpl<MCFixup> &Fixups) const; + + /// getMachineOpValue - Return binary encoding of operand. If the machine + /// operand requires relocation, record the relocation and return zero. + unsigned getMachineOpValue(const MCInst &MI,const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const; + + + void EmitByte(unsigned char C, raw_ostream &OS) const { + OS << (char)C; + } + + void EmitInstruction(uint32_t Val, raw_ostream &OS) const { + // Output the constant in little endian byte order. + for (unsigned i = 0; i != 4; ++i) { + EmitByte(Val & 0xff, OS); + Val >>= 8; + } + } + + + void EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const; + + template<int hasRs, int hasRt2> unsigned + fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue) const; + + unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue) const; + + unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue) const; + + +}; + +} // end anonymous namespace + +unsigned AArch64MCCodeEmitter::getAddressWithFixup(const MCOperand &MO, + unsigned FixupKind, + SmallVectorImpl<MCFixup> &Fixups) const { + if (!MO.isExpr()) { + // This can occur for manually decoded or constructed MCInsts, but neither + // the assembly-parser nor instruction selection will currently produce an + // MCInst that's not a symbol reference. + assert(MO.isImm() && "Unexpected address requested"); + return MO.getImm(); + } + + const MCExpr *Expr = MO.getExpr(); + MCFixupKind Kind = MCFixupKind(FixupKind); + Fixups.push_back(MCFixup::Create(0, Expr, Kind)); + + return 0; +} + +unsigned AArch64MCCodeEmitter:: +getOffsetUImm12OpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups, + int MemSize) const { + const MCOperand &ImmOp = MI.getOperand(OpIdx); + if (ImmOp.isImm()) + return ImmOp.getImm(); + + assert(ImmOp.isExpr() && "Unexpected operand type"); + const AArch64MCExpr *Expr = cast<AArch64MCExpr>(ImmOp.getExpr()); + unsigned FixupKind; + + + switch (Expr->getKind()) { + default: llvm_unreachable("Unexpected operand modifier"); + case AArch64MCExpr::VK_AARCH64_LO12: { + static const unsigned FixupsBySize[] = { AArch64::fixup_a64_ldst8_lo12, + AArch64::fixup_a64_ldst16_lo12, + AArch64::fixup_a64_ldst32_lo12, + AArch64::fixup_a64_ldst64_lo12, + AArch64::fixup_a64_ldst128_lo12 }; + assert(MemSize <= 16 && "Invalid fixup for operation"); + FixupKind = FixupsBySize[Log2_32(MemSize)]; + break; + } + case AArch64MCExpr::VK_AARCH64_GOT_LO12: + assert(MemSize == 8 && "Invalid fixup for operation"); + FixupKind = AArch64::fixup_a64_ld64_got_lo12_nc; + break; + case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: { + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_dtprel_lo12, + AArch64::fixup_a64_ldst16_dtprel_lo12, + AArch64::fixup_a64_ldst32_dtprel_lo12, + AArch64::fixup_a64_ldst64_dtprel_lo12 + }; + assert(MemSize <= 8 && "Invalid fixup for operation"); + FixupKind = FixupsBySize[Log2_32(MemSize)]; + break; + } + case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: { + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_dtprel_lo12_nc, + AArch64::fixup_a64_ldst16_dtprel_lo12_nc, + AArch64::fixup_a64_ldst32_dtprel_lo12_nc, + AArch64::fixup_a64_ldst64_dtprel_lo12_nc + }; + assert(MemSize <= 8 && "Invalid fixup for operation"); + FixupKind = FixupsBySize[Log2_32(MemSize)]; + break; + } + case AArch64MCExpr::VK_AARCH64_GOTTPREL_LO12: + assert(MemSize == 8 && "Invalid fixup for operation"); + FixupKind = AArch64::fixup_a64_ld64_gottprel_lo12_nc; + break; + case AArch64MCExpr::VK_AARCH64_TPREL_LO12:{ + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_tprel_lo12, + AArch64::fixup_a64_ldst16_tprel_lo12, + AArch64::fixup_a64_ldst32_tprel_lo12, + AArch64::fixup_a64_ldst64_tprel_lo12 + }; + assert(MemSize <= 8 && "Invalid fixup for operation"); + FixupKind = FixupsBySize[Log2_32(MemSize)]; + break; + } + case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: { + static const unsigned FixupsBySize[] = { + AArch64::fixup_a64_ldst8_tprel_lo12_nc, + AArch64::fixup_a64_ldst16_tprel_lo12_nc, + AArch64::fixup_a64_ldst32_tprel_lo12_nc, + AArch64::fixup_a64_ldst64_tprel_lo12_nc + }; + assert(MemSize <= 8 && "Invalid fixup for operation"); + FixupKind = FixupsBySize[Log2_32(MemSize)]; + break; + } + case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: + assert(MemSize == 8 && "Invalid fixup for operation"); + FixupKind = AArch64::fixup_a64_tlsdesc_ld64_lo12_nc; + break; + } + + return getAddressWithFixup(ImmOp, FixupKind, Fixups); +} + +unsigned +AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpIdx); + if (MO.isImm()) + return static_cast<unsigned>(MO.getImm()); + + assert(MO.isExpr()); + + unsigned FixupKind = 0; + switch(cast<AArch64MCExpr>(MO.getExpr())->getKind()) { + default: llvm_unreachable("Invalid expression modifier"); + case AArch64MCExpr::VK_AARCH64_LO12: + FixupKind = AArch64::fixup_a64_add_lo12; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_HI12: + FixupKind = AArch64::fixup_a64_add_dtprel_hi12; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_LO12: + FixupKind = AArch64::fixup_a64_add_dtprel_lo12; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_LO12_NC: + FixupKind = AArch64::fixup_a64_add_dtprel_lo12_nc; break; + case AArch64MCExpr::VK_AARCH64_TPREL_HI12: + FixupKind = AArch64::fixup_a64_add_tprel_hi12; break; + case AArch64MCExpr::VK_AARCH64_TPREL_LO12: + FixupKind = AArch64::fixup_a64_add_tprel_lo12; break; + case AArch64MCExpr::VK_AARCH64_TPREL_LO12_NC: + FixupKind = AArch64::fixup_a64_add_tprel_lo12_nc; break; + case AArch64MCExpr::VK_AARCH64_TLSDESC_LO12: + FixupKind = AArch64::fixup_a64_tlsdesc_add_lo12_nc; break; + } + + return getAddressWithFixup(MO, FixupKind, Fixups); +} + +unsigned +AArch64MCCodeEmitter::getAdrpLabelOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + + const MCOperand &MO = MI.getOperand(OpIdx); + if (MO.isImm()) + return static_cast<unsigned>(MO.getImm()); + + assert(MO.isExpr()); + + unsigned Modifier = AArch64MCExpr::VK_AARCH64_None; + if (const AArch64MCExpr *Expr = dyn_cast<AArch64MCExpr>(MO.getExpr())) + Modifier = Expr->getKind(); + + unsigned FixupKind = 0; + switch(Modifier) { + case AArch64MCExpr::VK_AARCH64_None: + FixupKind = AArch64::fixup_a64_adr_prel_page; + break; + case AArch64MCExpr::VK_AARCH64_GOT: + FixupKind = AArch64::fixup_a64_adr_prel_got_page; + break; + case AArch64MCExpr::VK_AARCH64_GOTTPREL: + FixupKind = AArch64::fixup_a64_adr_gottprel_page; + break; + case AArch64MCExpr::VK_AARCH64_TLSDESC: + FixupKind = AArch64::fixup_a64_tlsdesc_adr_page; + break; + default: + llvm_unreachable("Unknown symbol reference kind for ADRP instruction"); + } + + return getAddressWithFixup(MO, FixupKind, Fixups); +} + +unsigned +AArch64MCCodeEmitter::getBitfield32LSLOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Only immediate expected for shift"); + + return ((32 - MO.getImm()) & 0x1f) | (31 - MO.getImm()) << 6; +} + +unsigned +AArch64MCCodeEmitter::getBitfield64LSLOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + + const MCOperand &MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Only immediate expected for shift"); + + return ((64 - MO.getImm()) & 0x3f) | (63 - MO.getImm()) << 6; +} + +unsigned AArch64MCCodeEmitter::getShiftRightImm8( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return 8 - MI.getOperand(Op).getImm(); +} + +unsigned AArch64MCCodeEmitter::getShiftRightImm16( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return 16 - MI.getOperand(Op).getImm(); +} + +unsigned AArch64MCCodeEmitter::getShiftRightImm32( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return 32 - MI.getOperand(Op).getImm(); +} + +unsigned AArch64MCCodeEmitter::getShiftRightImm64( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return 64 - MI.getOperand(Op).getImm(); +} + +unsigned AArch64MCCodeEmitter::getShiftLeftImm8( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return MI.getOperand(Op).getImm() - 8; +} + +unsigned AArch64MCCodeEmitter::getShiftLeftImm16( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return MI.getOperand(Op).getImm() - 16; +} + +unsigned AArch64MCCodeEmitter::getShiftLeftImm32( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return MI.getOperand(Op).getImm() - 32; +} + +unsigned AArch64MCCodeEmitter::getShiftLeftImm64( + const MCInst &MI, unsigned Op, SmallVectorImpl<MCFixup> &Fixups) const { + return MI.getOperand(Op).getImm() - 64; +} + +template<AArch64::Fixups fixupDesired> unsigned +AArch64MCCodeEmitter::getLabelOpValue(const MCInst &MI, + unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + if (MO.isExpr()) + return getAddressWithFixup(MO, fixupDesired, Fixups); + + assert(MO.isImm()); + return MO.getImm(); +} + +unsigned +AArch64MCCodeEmitter::getLoadLitLabelOpValue(const MCInst &MI, + unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &MO = MI.getOperand(OpIdx); + + if (MO.isImm()) + return MO.getImm(); + + assert(MO.isExpr()); + + unsigned FixupKind; + if (isa<AArch64MCExpr>(MO.getExpr())) { + assert(dyn_cast<AArch64MCExpr>(MO.getExpr())->getKind() + == AArch64MCExpr::VK_AARCH64_GOTTPREL + && "Invalid symbol modifier for literal load"); + FixupKind = AArch64::fixup_a64_ld_gottprel_prel19; + } else { + FixupKind = AArch64::fixup_a64_ld_prel; + } + + return getAddressWithFixup(MO, FixupKind, Fixups); +} + + +unsigned +AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, + const MCOperand &MO, + SmallVectorImpl<MCFixup> &Fixups) const { + if (MO.isReg()) { + return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()); + } else if (MO.isImm()) { + return static_cast<unsigned>(MO.getImm()); + } + + llvm_unreachable("Unable to encode MCOperand!"); + return 0; +} + +unsigned +AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl<MCFixup> &Fixups) const { + const MCOperand &UImm16MO = MI.getOperand(OpIdx); + const MCOperand &ShiftMO = MI.getOperand(OpIdx + 1); + + unsigned Result = static_cast<unsigned>(ShiftMO.getImm()) << 16; + + if (UImm16MO.isImm()) { + Result |= UImm16MO.getImm(); + return Result; + } + + const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); + AArch64::Fixups requestedFixup; + switch (A64E->getKind()) { + default: llvm_unreachable("unexpected expression modifier"); + case AArch64MCExpr::VK_AARCH64_ABS_G0: + requestedFixup = AArch64::fixup_a64_movw_uabs_g0; break; + case AArch64MCExpr::VK_AARCH64_ABS_G0_NC: + requestedFixup = AArch64::fixup_a64_movw_uabs_g0_nc; break; + case AArch64MCExpr::VK_AARCH64_ABS_G1: + requestedFixup = AArch64::fixup_a64_movw_uabs_g1; break; + case AArch64MCExpr::VK_AARCH64_ABS_G1_NC: + requestedFixup = AArch64::fixup_a64_movw_uabs_g1_nc; break; + case AArch64MCExpr::VK_AARCH64_ABS_G2: + requestedFixup = AArch64::fixup_a64_movw_uabs_g2; break; + case AArch64MCExpr::VK_AARCH64_ABS_G2_NC: + requestedFixup = AArch64::fixup_a64_movw_uabs_g2_nc; break; + case AArch64MCExpr::VK_AARCH64_ABS_G3: + requestedFixup = AArch64::fixup_a64_movw_uabs_g3; break; + case AArch64MCExpr::VK_AARCH64_SABS_G0: + requestedFixup = AArch64::fixup_a64_movw_sabs_g0; break; + case AArch64MCExpr::VK_AARCH64_SABS_G1: + requestedFixup = AArch64::fixup_a64_movw_sabs_g1; break; + case AArch64MCExpr::VK_AARCH64_SABS_G2: + requestedFixup = AArch64::fixup_a64_movw_sabs_g2; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_G2: + requestedFixup = AArch64::fixup_a64_movw_dtprel_g2; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_G1: + requestedFixup = AArch64::fixup_a64_movw_dtprel_g1; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_G1_NC: + requestedFixup = AArch64::fixup_a64_movw_dtprel_g1_nc; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_G0: + requestedFixup = AArch64::fixup_a64_movw_dtprel_g0; break; + case AArch64MCExpr::VK_AARCH64_DTPREL_G0_NC: + requestedFixup = AArch64::fixup_a64_movw_dtprel_g0_nc; break; + case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: + requestedFixup = AArch64::fixup_a64_movw_gottprel_g1; break; + case AArch64MCExpr::VK_AARCH64_GOTTPREL_G0_NC: + requestedFixup = AArch64::fixup_a64_movw_gottprel_g0_nc; break; + case AArch64MCExpr::VK_AARCH64_TPREL_G2: + requestedFixup = AArch64::fixup_a64_movw_tprel_g2; break; + case AArch64MCExpr::VK_AARCH64_TPREL_G1: + requestedFixup = AArch64::fixup_a64_movw_tprel_g1; break; + case AArch64MCExpr::VK_AARCH64_TPREL_G1_NC: + requestedFixup = AArch64::fixup_a64_movw_tprel_g1_nc; break; + case AArch64MCExpr::VK_AARCH64_TPREL_G0: + requestedFixup = AArch64::fixup_a64_movw_tprel_g0; break; + case AArch64MCExpr::VK_AARCH64_TPREL_G0_NC: + requestedFixup = AArch64::fixup_a64_movw_tprel_g0_nc; break; + } + + return Result | getAddressWithFixup(UImm16MO, requestedFixup, Fixups); +} + +template<int hasRs, int hasRt2> unsigned +AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI, + unsigned EncodedValue) const { + if (!hasRs) EncodedValue |= 0x001F0000; + if (!hasRt2) EncodedValue |= 0x00007C00; + + return EncodedValue; +} + +unsigned +AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue) const { + // If one of the signed fixup kinds is applied to a MOVZ instruction, the + // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's + // job to ensure that any bits possibly affected by this are 0. This means we + // must zero out bit 30 (essentially emitting a MOVN). + MCOperand UImm16MO = MI.getOperand(1); + + // Nothing to do if there's no fixup. + if (UImm16MO.isImm()) + return EncodedValue; + + const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); + switch (A64E->getKind()) { + case AArch64MCExpr::VK_AARCH64_SABS_G0: + case AArch64MCExpr::VK_AARCH64_SABS_G1: + case AArch64MCExpr::VK_AARCH64_SABS_G2: + case AArch64MCExpr::VK_AARCH64_DTPREL_G2: + case AArch64MCExpr::VK_AARCH64_DTPREL_G1: + case AArch64MCExpr::VK_AARCH64_DTPREL_G0: + case AArch64MCExpr::VK_AARCH64_GOTTPREL_G1: + case AArch64MCExpr::VK_AARCH64_TPREL_G2: + case AArch64MCExpr::VK_AARCH64_TPREL_G1: + case AArch64MCExpr::VK_AARCH64_TPREL_G0: + return EncodedValue & ~(1u << 30); + default: + // Nothing to do for an unsigned fixup. + return EncodedValue; + } + + llvm_unreachable("Should have returned by now"); +} + +unsigned +AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI, + unsigned EncodedValue) const { + // The Ra field of SMULH and UMULH is unused: it should be assembled as 31 + // (i.e. all bits 1) but is ignored by the processor. + EncodedValue |= 0x1f << 10; + return EncodedValue; +} + +MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI, + MCContext &Ctx) { + return new AArch64MCCodeEmitter(Ctx); +} + +void AArch64MCCodeEmitter:: +EncodeInstruction(const MCInst &MI, raw_ostream &OS, + SmallVectorImpl<MCFixup> &Fixups) const { + if (MI.getOpcode() == AArch64::TLSDESCCALL) { + // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the + // following (BLR) instruction. It doesn't emit any code itself so it + // doesn't go through the normal TableGenerated channels. + MCFixupKind Fixup = MCFixupKind(AArch64::fixup_a64_tlsdesc_call); + const MCExpr *Expr; + Expr = AArch64MCExpr::CreateTLSDesc(MI.getOperand(0).getExpr(), Ctx); + Fixups.push_back(MCFixup::Create(0, Expr, Fixup)); + return; + } + + uint32_t Binary = getBinaryCodeForInstr(MI, Fixups); + + EmitInstruction(Binary, OS); +} + + +#include "AArch64GenMCCodeEmitter.inc" diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp new file mode 100644 index 000000000000..c1abfe74dfdd --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp @@ -0,0 +1,178 @@ +//===-- AArch64MCExpr.cpp - AArch64 specific MC expression classes --------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file contains the implementation of the assembly expression modifiers +// accepted by the AArch64 architecture (e.g. ":lo12:", ":gottprel_g1:", ...). +// +//===----------------------------------------------------------------------===// + +#define DEBUG_TYPE "aarch64mcexpr" +#include "AArch64MCExpr.h" +#include "llvm/MC/MCContext.h" +#include "llvm/MC/MCAssembler.h" +#include "llvm/MC/MCELF.h" +#include "llvm/Object/ELF.h" + +using namespace llvm; + +const AArch64MCExpr* +AArch64MCExpr::Create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx) { + return new (Ctx) AArch64MCExpr(Kind, Expr); +} + +void AArch64MCExpr::PrintImpl(raw_ostream &OS) const { + switch (Kind) { + default: llvm_unreachable("Invalid kind!"); + case VK_AARCH64_GOT: OS << ":got:"; break; + case VK_AARCH64_GOT_LO12: OS << ":got_lo12:"; break; + case VK_AARCH64_LO12: OS << ":lo12:"; break; + case VK_AARCH64_ABS_G0: OS << ":abs_g0:"; break; + case VK_AARCH64_ABS_G0_NC: OS << ":abs_g0_nc:"; break; + case VK_AARCH64_ABS_G1: OS << ":abs_g1:"; break; + case VK_AARCH64_ABS_G1_NC: OS << ":abs_g1_nc:"; break; + case VK_AARCH64_ABS_G2: OS << ":abs_g2:"; break; + case VK_AARCH64_ABS_G2_NC: OS << ":abs_g2_nc:"; break; + case VK_AARCH64_ABS_G3: OS << ":abs_g3:"; break; + case VK_AARCH64_SABS_G0: OS << ":abs_g0_s:"; break; + case VK_AARCH64_SABS_G1: OS << ":abs_g1_s:"; break; + case VK_AARCH64_SABS_G2: OS << ":abs_g2_s:"; break; + case VK_AARCH64_DTPREL_G2: OS << ":dtprel_g2:"; break; + case VK_AARCH64_DTPREL_G1: OS << ":dtprel_g1:"; break; + case VK_AARCH64_DTPREL_G1_NC: OS << ":dtprel_g1_nc:"; break; + case VK_AARCH64_DTPREL_G0: OS << ":dtprel_g0:"; break; + case VK_AARCH64_DTPREL_G0_NC: OS << ":dtprel_g0_nc:"; break; + case VK_AARCH64_DTPREL_HI12: OS << ":dtprel_hi12:"; break; + case VK_AARCH64_DTPREL_LO12: OS << ":dtprel_lo12:"; break; + case VK_AARCH64_DTPREL_LO12_NC: OS << ":dtprel_lo12_nc:"; break; + case VK_AARCH64_GOTTPREL_G1: OS << ":gottprel_g1:"; break; + case VK_AARCH64_GOTTPREL_G0_NC: OS << ":gottprel_g0_nc:"; break; + case VK_AARCH64_GOTTPREL: OS << ":gottprel:"; break; + case VK_AARCH64_GOTTPREL_LO12: OS << ":gottprel_lo12:"; break; + case VK_AARCH64_TPREL_G2: OS << ":tprel_g2:"; break; + case VK_AARCH64_TPREL_G1: OS << ":tprel_g1:"; break; + case VK_AARCH64_TPREL_G1_NC: OS << ":tprel_g1_nc:"; break; + case VK_AARCH64_TPREL_G0: OS << ":tprel_g0:"; break; + case VK_AARCH64_TPREL_G0_NC: OS << ":tprel_g0_nc:"; break; + case VK_AARCH64_TPREL_HI12: OS << ":tprel_hi12:"; break; + case VK_AARCH64_TPREL_LO12: OS << ":tprel_lo12:"; break; + case VK_AARCH64_TPREL_LO12_NC: OS << ":tprel_lo12_nc:"; break; + case VK_AARCH64_TLSDESC: OS << ":tlsdesc:"; break; + case VK_AARCH64_TLSDESC_LO12: OS << ":tlsdesc_lo12:"; break; + + } + + const MCExpr *Expr = getSubExpr(); + if (Expr->getKind() != MCExpr::SymbolRef) + OS << '('; + Expr->print(OS); + if (Expr->getKind() != MCExpr::SymbolRef) + OS << ')'; +} + +bool +AArch64MCExpr::EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const { + return getSubExpr()->EvaluateAsRelocatable(Res, *Layout); +} + +static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) { + switch (Expr->getKind()) { + case MCExpr::Target: + llvm_unreachable("Can't handle nested target expression"); + break; + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr); + fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm); + fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: { + // We're known to be under a TLS fixup, so any symbol should be + // modified. There should be only one. + const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr); + MCSymbolData &SD = Asm.getOrCreateSymbolData(SymRef.getSymbol()); + MCELF::SetType(SD, ELF::STT_TLS); + break; + } + + case MCExpr::Unary: + fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm); + break; + } +} + +void AArch64MCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { + switch (getKind()) { + default: + return; + case VK_AARCH64_DTPREL_G2: + case VK_AARCH64_DTPREL_G1: + case VK_AARCH64_DTPREL_G1_NC: + case VK_AARCH64_DTPREL_G0: + case VK_AARCH64_DTPREL_G0_NC: + case VK_AARCH64_DTPREL_HI12: + case VK_AARCH64_DTPREL_LO12: + case VK_AARCH64_DTPREL_LO12_NC: + case VK_AARCH64_GOTTPREL_G1: + case VK_AARCH64_GOTTPREL_G0_NC: + case VK_AARCH64_GOTTPREL: + case VK_AARCH64_GOTTPREL_LO12: + case VK_AARCH64_TPREL_G2: + case VK_AARCH64_TPREL_G1: + case VK_AARCH64_TPREL_G1_NC: + case VK_AARCH64_TPREL_G0: + case VK_AARCH64_TPREL_G0_NC: + case VK_AARCH64_TPREL_HI12: + case VK_AARCH64_TPREL_LO12: + case VK_AARCH64_TPREL_LO12_NC: + case VK_AARCH64_TLSDESC: + case VK_AARCH64_TLSDESC_LO12: + break; + } + + fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm); +} + +// FIXME: This basically copies MCObjectStreamer::AddValueSymbols. Perhaps +// that method should be made public? +// FIXME: really do above: now that two backends are using it. +static void AddValueSymbolsImpl(const MCExpr *Value, MCAssembler *Asm) { + switch (Value->getKind()) { + case MCExpr::Target: + llvm_unreachable("Can't handle nested target expr!"); + break; + + case MCExpr::Constant: + break; + + case MCExpr::Binary: { + const MCBinaryExpr *BE = cast<MCBinaryExpr>(Value); + AddValueSymbolsImpl(BE->getLHS(), Asm); + AddValueSymbolsImpl(BE->getRHS(), Asm); + break; + } + + case MCExpr::SymbolRef: + Asm->getOrCreateSymbolData(cast<MCSymbolRefExpr>(Value)->getSymbol()); + break; + + case MCExpr::Unary: + AddValueSymbolsImpl(cast<MCUnaryExpr>(Value)->getSubExpr(), Asm); + break; + } +} + +void AArch64MCExpr::AddValueSymbols(MCAssembler *Asm) const { + AddValueSymbolsImpl(getSubExpr(), Asm); +} diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h new file mode 100644 index 000000000000..d9798ae99078 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h @@ -0,0 +1,187 @@ +//==- AArch64MCExpr.h - AArch64 specific MC expression classes --*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes AArch64-specific MCExprs, used for modifiers like +// ":lo12:" or ":gottprel_g1:". +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64MCEXPR_H +#define LLVM_AARCH64MCEXPR_H + +#include "llvm/MC/MCExpr.h" + +namespace llvm { + +class AArch64MCExpr : public MCTargetExpr { +public: + enum VariantKind { + VK_AARCH64_None, + VK_AARCH64_GOT, // :got: modifier in assembly + VK_AARCH64_GOT_LO12, // :got_lo12: + VK_AARCH64_LO12, // :lo12: + + VK_AARCH64_ABS_G0, // :abs_g0: + VK_AARCH64_ABS_G0_NC, // :abs_g0_nc: + VK_AARCH64_ABS_G1, + VK_AARCH64_ABS_G1_NC, + VK_AARCH64_ABS_G2, + VK_AARCH64_ABS_G2_NC, + VK_AARCH64_ABS_G3, + + VK_AARCH64_SABS_G0, // :abs_g0_s: + VK_AARCH64_SABS_G1, + VK_AARCH64_SABS_G2, + + VK_AARCH64_DTPREL_G2, // :dtprel_g2: + VK_AARCH64_DTPREL_G1, + VK_AARCH64_DTPREL_G1_NC, + VK_AARCH64_DTPREL_G0, + VK_AARCH64_DTPREL_G0_NC, + VK_AARCH64_DTPREL_HI12, + VK_AARCH64_DTPREL_LO12, + VK_AARCH64_DTPREL_LO12_NC, + + VK_AARCH64_GOTTPREL_G1, // :gottprel: + VK_AARCH64_GOTTPREL_G0_NC, + VK_AARCH64_GOTTPREL, + VK_AARCH64_GOTTPREL_LO12, + + VK_AARCH64_TPREL_G2, // :tprel: + VK_AARCH64_TPREL_G1, + VK_AARCH64_TPREL_G1_NC, + VK_AARCH64_TPREL_G0, + VK_AARCH64_TPREL_G0_NC, + VK_AARCH64_TPREL_HI12, + VK_AARCH64_TPREL_LO12, + VK_AARCH64_TPREL_LO12_NC, + + VK_AARCH64_TLSDESC, // :tlsdesc: + VK_AARCH64_TLSDESC_LO12 + }; + +private: + const VariantKind Kind; + const MCExpr *Expr; + + explicit AArch64MCExpr(VariantKind _Kind, const MCExpr *_Expr) + : Kind(_Kind), Expr(_Expr) {} + +public: + /// @name Construction + /// @{ + + static const AArch64MCExpr *Create(VariantKind Kind, const MCExpr *Expr, + MCContext &Ctx); + + static const AArch64MCExpr *CreateLo12(const MCExpr *Expr, MCContext &Ctx) { + return Create(VK_AARCH64_LO12, Expr, Ctx); + } + + static const AArch64MCExpr *CreateGOT(const MCExpr *Expr, MCContext &Ctx) { + return Create(VK_AARCH64_GOT, Expr, Ctx); + } + + static const AArch64MCExpr *CreateGOTLo12(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_GOT_LO12, Expr, Ctx); + } + + static const AArch64MCExpr *CreateDTPREL_G1(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_DTPREL_G1, Expr, Ctx); + } + + static const AArch64MCExpr *CreateDTPREL_G0_NC(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_DTPREL_G0_NC, Expr, Ctx); + } + + static const AArch64MCExpr *CreateGOTTPREL(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_GOTTPREL, Expr, Ctx); + } + + static const AArch64MCExpr *CreateGOTTPRELLo12(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_GOTTPREL_LO12, Expr, Ctx); + } + + static const AArch64MCExpr *CreateTLSDesc(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_TLSDESC, Expr, Ctx); + } + + static const AArch64MCExpr *CreateTLSDescLo12(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_TLSDESC_LO12, Expr, Ctx); + } + + static const AArch64MCExpr *CreateTPREL_G1(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_TPREL_G1, Expr, Ctx); + } + + static const AArch64MCExpr *CreateTPREL_G0_NC(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_TPREL_G0_NC, Expr, Ctx); + } + + static const AArch64MCExpr *CreateABS_G3(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_ABS_G3, Expr, Ctx); + } + + static const AArch64MCExpr *CreateABS_G2_NC(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_ABS_G2_NC, Expr, Ctx); + } + + static const AArch64MCExpr *CreateABS_G1_NC(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_ABS_G1_NC, Expr, Ctx); + } + + static const AArch64MCExpr *CreateABS_G0_NC(const MCExpr *Expr, + MCContext &Ctx) { + return Create(VK_AARCH64_ABS_G0_NC, Expr, Ctx); + } + + /// @} + /// @name Accessors + /// @{ + + /// getOpcode - Get the kind of this expression. + VariantKind getKind() const { return Kind; } + + /// getSubExpr - Get the child of this expression. + const MCExpr *getSubExpr() const { return Expr; } + + /// @} + + void PrintImpl(raw_ostream &OS) const; + bool EvaluateAsRelocatableImpl(MCValue &Res, + const MCAsmLayout *Layout) const; + void AddValueSymbols(MCAssembler *) const; + const MCSection *FindAssociatedSection() const { + return getSubExpr()->FindAssociatedSection(); + } + + void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const; + + static bool classof(const MCExpr *E) { + return E->getKind() == MCExpr::Target; + } + + static bool classof(const AArch64MCExpr *) { return true; } + +}; +} // end namespace llvm + +#endif diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp new file mode 100644 index 000000000000..58fc95c2eaf6 --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -0,0 +1,201 @@ +//===-- AArch64MCTargetDesc.cpp - AArch64 Target Descriptions -------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides AArch64 specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#include "AArch64MCTargetDesc.h" +#include "AArch64ELFStreamer.h" +#include "AArch64MCAsmInfo.h" +#include "InstPrinter/AArch64InstPrinter.h" +#include "llvm/ADT/APInt.h" +#include "llvm/MC/MCCodeGenInfo.h" +#include "llvm/MC/MCInstrAnalysis.h" +#include "llvm/MC/MCInstrInfo.h" +#include "llvm/MC/MCRegisterInfo.h" +#include "llvm/MC/MCStreamer.h" +#include "llvm/MC/MCSubtargetInfo.h" +#include "llvm/Support/TargetRegistry.h" +#include "llvm/Support/ErrorHandling.h" + +#define GET_REGINFO_MC_DESC +#include "AArch64GenRegisterInfo.inc" + +#define GET_INSTRINFO_MC_DESC +#include "AArch64GenInstrInfo.inc" + +#define GET_SUBTARGETINFO_MC_DESC +#include "AArch64GenSubtargetInfo.inc" + +using namespace llvm; + +MCSubtargetInfo *AArch64_MC::createAArch64MCSubtargetInfo(StringRef TT, + StringRef CPU, + StringRef FS) { + MCSubtargetInfo *X = new MCSubtargetInfo(); + InitAArch64MCSubtargetInfo(X, TT, CPU, FS); + return X; +} + + +static MCInstrInfo *createAArch64MCInstrInfo() { + MCInstrInfo *X = new MCInstrInfo(); + InitAArch64MCInstrInfo(X); + return X; +} + +static MCRegisterInfo *createAArch64MCRegisterInfo(StringRef Triple) { + MCRegisterInfo *X = new MCRegisterInfo(); + InitAArch64MCRegisterInfo(X, AArch64::X30); + return X; +} + +static MCAsmInfo *createAArch64MCAsmInfo(const MCRegisterInfo &MRI, + StringRef TT) { + Triple TheTriple(TT); + + MCAsmInfo *MAI = new AArch64ELFMCAsmInfo(); + unsigned Reg = MRI.getDwarfRegNum(AArch64::XSP, true); + MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(0, Reg, 0); + MAI->addInitialFrameState(Inst); + + return MAI; +} + +static MCCodeGenInfo *createAArch64MCCodeGenInfo(StringRef TT, Reloc::Model RM, + CodeModel::Model CM, + CodeGenOpt::Level OL) { + MCCodeGenInfo *X = new MCCodeGenInfo(); + if (RM == Reloc::Default || RM == Reloc::DynamicNoPIC) { + // On ELF platforms the default static relocation model has a smart enough + // linker to cope with referencing external symbols defined in a shared + // library. Hence DynamicNoPIC doesn't need to be promoted to PIC. + RM = Reloc::Static; + } + + if (CM == CodeModel::Default) + CM = CodeModel::Small; + else if (CM == CodeModel::JITDefault) { + // The default MCJIT memory managers make no guarantees about where they can + // find an executable page; JITed code needs to be able to refer to globals + // no matter how far away they are. + CM = CodeModel::Large; + } + + X->InitMCCodeGenInfo(RM, CM, OL); + return X; +} + +static MCStreamer *createMCStreamer(const Target &T, StringRef TT, + MCContext &Ctx, MCAsmBackend &MAB, + raw_ostream &OS, + MCCodeEmitter *Emitter, + bool RelaxAll, + bool NoExecStack) { + Triple TheTriple(TT); + + return createAArch64ELFStreamer(Ctx, MAB, OS, Emitter, RelaxAll, NoExecStack); +} + + +static MCInstPrinter *createAArch64MCInstPrinter(const Target &T, + unsigned SyntaxVariant, + const MCAsmInfo &MAI, + const MCInstrInfo &MII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI) { + if (SyntaxVariant == 0) + return new AArch64InstPrinter(MAI, MII, MRI, STI); + return 0; +} + +namespace { + +class AArch64MCInstrAnalysis : public MCInstrAnalysis { +public: + AArch64MCInstrAnalysis(const MCInstrInfo *Info) : MCInstrAnalysis(Info) {} + + virtual bool isUnconditionalBranch(const MCInst &Inst) const { + if (Inst.getOpcode() == AArch64::Bcc + && Inst.getOperand(0).getImm() == A64CC::AL) + return true; + return MCInstrAnalysis::isUnconditionalBranch(Inst); + } + + virtual bool isConditionalBranch(const MCInst &Inst) const { + if (Inst.getOpcode() == AArch64::Bcc + && Inst.getOperand(0).getImm() == A64CC::AL) + return false; + return MCInstrAnalysis::isConditionalBranch(Inst); + } + + bool evaluateBranch(const MCInst &Inst, uint64_t Addr, + uint64_t Size, uint64_t &Target) const { + unsigned LblOperand = Inst.getOpcode() == AArch64::Bcc ? 1 : 0; + // FIXME: We only handle PCRel branches for now. + if (Info->get(Inst.getOpcode()).OpInfo[LblOperand].OperandType + != MCOI::OPERAND_PCREL) + return false; + + int64_t Imm = Inst.getOperand(LblOperand).getImm(); + Target = Addr + Imm; + return true; + } +}; + +} + +static MCInstrAnalysis *createAArch64MCInstrAnalysis(const MCInstrInfo *Info) { + return new AArch64MCInstrAnalysis(Info); +} + + + +extern "C" void LLVMInitializeAArch64TargetMC() { + // Register the MC asm info. + RegisterMCAsmInfoFn A(TheAArch64Target, createAArch64MCAsmInfo); + + // Register the MC codegen info. + TargetRegistry::RegisterMCCodeGenInfo(TheAArch64Target, + createAArch64MCCodeGenInfo); + + // Register the MC instruction info. + TargetRegistry::RegisterMCInstrInfo(TheAArch64Target, + createAArch64MCInstrInfo); + + // Register the MC register info. + TargetRegistry::RegisterMCRegInfo(TheAArch64Target, + createAArch64MCRegisterInfo); + + // Register the MC subtarget info. + using AArch64_MC::createAArch64MCSubtargetInfo; + TargetRegistry::RegisterMCSubtargetInfo(TheAArch64Target, + createAArch64MCSubtargetInfo); + + // Register the MC instruction analyzer. + TargetRegistry::RegisterMCInstrAnalysis(TheAArch64Target, + createAArch64MCInstrAnalysis); + + // Register the MC Code Emitter + TargetRegistry::RegisterMCCodeEmitter(TheAArch64Target, + createAArch64MCCodeEmitter); + + // Register the asm backend. + TargetRegistry::RegisterMCAsmBackend(TheAArch64Target, + createAArch64AsmBackend); + + // Register the object streamer. + TargetRegistry::RegisterMCObjectStreamer(TheAArch64Target, + createMCStreamer); + + // Register the MCInstPrinter. + TargetRegistry::RegisterMCInstPrinter(TheAArch64Target, + createAArch64MCInstPrinter); +} diff --git a/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h new file mode 100644 index 000000000000..670e657ec73c --- /dev/null +++ b/contrib/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.h @@ -0,0 +1,66 @@ +//===-- AArch64MCTargetDesc.h - AArch64 Target Descriptions -----*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file provides AArch64 specific target descriptions. +// +//===----------------------------------------------------------------------===// + +#ifndef LLVM_AARCH64MCTARGETDESC_H +#define LLVM_AARCH64MCTARGETDESC_H + +#include "llvm/Support/DataTypes.h" + +namespace llvm { +class MCAsmBackend; +class MCCodeEmitter; +class MCContext; +class MCInstrInfo; +class MCObjectWriter; +class MCRegisterInfo; +class MCSubtargetInfo; +class StringRef; +class Target; +class raw_ostream; + +extern Target TheAArch64Target; + +namespace AArch64_MC { + MCSubtargetInfo *createAArch64MCSubtargetInfo(StringRef TT, StringRef CPU, + StringRef FS); +} + +MCCodeEmitter *createAArch64MCCodeEmitter(const MCInstrInfo &MCII, + const MCRegisterInfo &MRI, + const MCSubtargetInfo &STI, + MCContext &Ctx); + +MCObjectWriter *createAArch64ELFObjectWriter(raw_ostream &OS, + uint8_t OSABI); + +MCAsmBackend *createAArch64AsmBackend(const Target &T, + const MCRegisterInfo &MRI, + StringRef TT, StringRef CPU); + +} // End llvm namespace + +// Defines symbolic names for AArch64 registers. This defines a mapping from +// register name to register number. +// +#define GET_REGINFO_ENUM +#include "AArch64GenRegisterInfo.inc" + +// Defines symbolic names for the AArch64 instructions. +// +#define GET_INSTRINFO_ENUM +#include "AArch64GenInstrInfo.inc" + +#define GET_SUBTARGETINFO_ENUM +#include "AArch64GenSubtargetInfo.inc" + +#endif |