aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/X86/MCTargetDesc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/X86/MCTargetDesc')
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp114
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h31
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp75
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp141
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h6
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp2
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp618
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp101
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h10
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp119
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp7
-rw-r--r--contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp8
12 files changed, 500 insertions, 732 deletions
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
index 135c32bf8c3b..e77a0dc9bc27 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86AsmBackend.cpp
@@ -21,7 +21,7 @@
#include "llvm/MC/MCSectionCOFF.h"
#include "llvm/MC/MCSectionELF.h"
#include "llvm/MC/MCSectionMachO.h"
-#include "llvm/Support/CommandLine.h"
+#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/Support/ELF.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MachO.h"
@@ -43,8 +43,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
return 1;
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case X86::reloc_global_offset_table:
case FK_SecRel_4:
case FK_Data_4:
@@ -72,7 +75,8 @@ class X86AsmBackend : public MCAsmBackend {
const uint64_t MaxNopLength;
public:
X86AsmBackend(const Target &T, StringRef CPU)
- : MCAsmBackend(), CPU(CPU), MaxNopLength(CPU == "slm" ? 7 : 15) {
+ : MCAsmBackend(), CPU(CPU),
+ MaxNopLength((CPU == "slm" || CPU == "lakemont") ? 7 : 15) {
HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
@@ -86,10 +90,14 @@ public:
const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
- { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
- { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
- { "reloc_signed_4byte", 0, 4 * 8, 0},
- { "reloc_global_offset_table", 0, 4 * 8, 0}
+ {"reloc_riprel_4byte", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_movq_load", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_riprel_4byte_relax_rex", 0, 32, MCFixupKindInfo::FKF_IsPCRel},
+ {"reloc_signed_4byte", 0, 32, 0},
+ {"reloc_signed_4byte_relax", 0, 32, 0},
+ {"reloc_global_offset_table", 0, 32, 0},
+ {"reloc_global_offset_table8", 0, 64, 0},
};
if (Kind < FirstTargetFixupKind)
@@ -124,38 +132,57 @@ public:
const MCRelaxableFragment *DF,
const MCAsmLayout &Layout) const override;
- void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override;
bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
};
} // end anonymous namespace
-static unsigned getRelaxedOpcodeBranch(unsigned Op) {
+static unsigned getRelaxedOpcodeBranch(const MCInst &Inst, bool is16BitMode) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
-
- case X86::JAE_1: return X86::JAE_4;
- case X86::JA_1: return X86::JA_4;
- case X86::JBE_1: return X86::JBE_4;
- case X86::JB_1: return X86::JB_4;
- case X86::JE_1: return X86::JE_4;
- case X86::JGE_1: return X86::JGE_4;
- case X86::JG_1: return X86::JG_4;
- case X86::JLE_1: return X86::JLE_4;
- case X86::JL_1: return X86::JL_4;
- case X86::JMP_1: return X86::JMP_4;
- case X86::JNE_1: return X86::JNE_4;
- case X86::JNO_1: return X86::JNO_4;
- case X86::JNP_1: return X86::JNP_4;
- case X86::JNS_1: return X86::JNS_4;
- case X86::JO_1: return X86::JO_4;
- case X86::JP_1: return X86::JP_4;
- case X86::JS_1: return X86::JS_4;
+ case X86::JAE_1:
+ return (is16BitMode) ? X86::JAE_2 : X86::JAE_4;
+ case X86::JA_1:
+ return (is16BitMode) ? X86::JA_2 : X86::JA_4;
+ case X86::JBE_1:
+ return (is16BitMode) ? X86::JBE_2 : X86::JBE_4;
+ case X86::JB_1:
+ return (is16BitMode) ? X86::JB_2 : X86::JB_4;
+ case X86::JE_1:
+ return (is16BitMode) ? X86::JE_2 : X86::JE_4;
+ case X86::JGE_1:
+ return (is16BitMode) ? X86::JGE_2 : X86::JGE_4;
+ case X86::JG_1:
+ return (is16BitMode) ? X86::JG_2 : X86::JG_4;
+ case X86::JLE_1:
+ return (is16BitMode) ? X86::JLE_2 : X86::JLE_4;
+ case X86::JL_1:
+ return (is16BitMode) ? X86::JL_2 : X86::JL_4;
+ case X86::JMP_1:
+ return (is16BitMode) ? X86::JMP_2 : X86::JMP_4;
+ case X86::JNE_1:
+ return (is16BitMode) ? X86::JNE_2 : X86::JNE_4;
+ case X86::JNO_1:
+ return (is16BitMode) ? X86::JNO_2 : X86::JNO_4;
+ case X86::JNP_1:
+ return (is16BitMode) ? X86::JNP_2 : X86::JNP_4;
+ case X86::JNS_1:
+ return (is16BitMode) ? X86::JNS_2 : X86::JNS_4;
+ case X86::JO_1:
+ return (is16BitMode) ? X86::JO_2 : X86::JO_4;
+ case X86::JP_1:
+ return (is16BitMode) ? X86::JP_2 : X86::JP_4;
+ case X86::JS_1:
+ return (is16BitMode) ? X86::JS_2 : X86::JS_4;
}
}
-static unsigned getRelaxedOpcodeArith(unsigned Op) {
+static unsigned getRelaxedOpcodeArith(const MCInst &Inst) {
+ unsigned Op = Inst.getOpcode();
switch (Op) {
default:
return Op;
@@ -239,20 +266,20 @@ static unsigned getRelaxedOpcodeArith(unsigned Op) {
}
}
-static unsigned getRelaxedOpcode(unsigned Op) {
- unsigned R = getRelaxedOpcodeArith(Op);
- if (R != Op)
+static unsigned getRelaxedOpcode(const MCInst &Inst, bool is16BitMode) {
+ unsigned R = getRelaxedOpcodeArith(Inst);
+ if (R != Inst.getOpcode())
return R;
- return getRelaxedOpcodeBranch(Op);
+ return getRelaxedOpcodeBranch(Inst, is16BitMode);
}
bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
- // Branches can always be relaxed.
- if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
+ // Branches can always be relaxed in either mode.
+ if (getRelaxedOpcodeBranch(Inst, false) != Inst.getOpcode())
return true;
// Check if this instruction is ever relaxable.
- if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
+ if (getRelaxedOpcodeArith(Inst) == Inst.getOpcode())
return false;
@@ -275,9 +302,12 @@ bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
// FIXME: Can tblgen help at all here to verify there aren't other instructions
// we can relax?
-void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
+void X86AsmBackend::relaxInstruction(const MCInst &Inst,
+ const MCSubtargetInfo &STI,
+ MCInst &Res) const {
// The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
- unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
+ bool is16BitMode = STI.getFeatureBits()[X86::Mode16Bit];
+ unsigned RelaxedOp = getRelaxedOpcode(Inst, is16BitMode);
if (RelaxedOp == Inst.getOpcode()) {
SmallString<256> Tmp;
@@ -405,6 +435,14 @@ public:
, Is64Bit(is64Bit) {
}
+ Optional<MCFixupKind> getFixupKind(StringRef Name) const override {
+ return StringSwitch<Optional<MCFixupKind>>(Name)
+ .Case("dir32", FK_Data_4)
+ .Case("secrel32", FK_SecRel_4)
+ .Case("secidx", FK_SecRel_2)
+ .Default(MCAsmBackend::getFixupKind(Name));
+ }
+
MCObjectWriter *createObjectWriter(raw_pwrite_stream &OS) const override {
return createX86WinCOFFObjectWriter(OS, Is64Bit);
}
@@ -803,7 +841,7 @@ MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
if (TheTriple.isOSBinFormatMachO())
return new DarwinX86_32AsmBackend(T, MRI, CPU);
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, false, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
@@ -826,7 +864,7 @@ MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
return new DarwinX86_64AsmBackend(T, MRI, CPU, CS);
}
- if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
+ if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
return new WindowsX86AsmBackend(T, true, CPU);
uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
index 9ff85b9154f8..b4195176f904 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h
@@ -186,11 +186,6 @@ namespace X86II {
/// dllimport linkage on windows.
MO_DLLIMPORT,
- /// MO_DARWIN_STUB - On a symbol operand "FOO", this indicates that the
- /// reference is actually to the "FOO$stub" symbol. This is used for calls
- /// and jumps to external functions on Tiger and earlier.
- MO_DARWIN_STUB,
-
/// MO_DARWIN_NONLAZY - On a symbol operand "FOO", this indicates that the
/// reference is actually to the "FOO$non_lazy_ptr" symbol, which is a
/// non-PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
@@ -201,12 +196,6 @@ namespace X86II {
/// a PIC-base-relative reference to a non-hidden dyld lazy pointer stub.
MO_DARWIN_NONLAZY_PIC_BASE,
- /// MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE - On a symbol operand "FOO", this
- /// indicates that the reference is actually to "FOO$non_lazy_ptr -PICBASE",
- /// which is a PIC-base-relative reference to a hidden dyld lazy pointer
- /// stub.
- MO_DARWIN_HIDDEN_NONLAZY_PIC_BASE,
-
/// MO_TLVP - On a symbol operand this indicates that the immediate is
/// some TLS offset.
///
@@ -667,7 +656,7 @@ namespace X86II {
/// is duplicated in the MCInst (e.g. "EAX = addl EAX, [mem]") it is only
/// counted as one operand.
///
- inline int getMemoryOperandNo(uint64_t TSFlags, unsigned Opcode) {
+ inline int getMemoryOperandNo(uint64_t TSFlags) {
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
@@ -734,12 +723,12 @@ namespace X86II {
/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or
/// higher) register? e.g. r8, xmm8, xmm13, etc.
inline bool isX86_64ExtendedReg(unsigned RegNo) {
- if ((RegNo > X86::XMM7 && RegNo <= X86::XMM15) ||
- (RegNo > X86::XMM23 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM7 && RegNo <= X86::YMM15) ||
- (RegNo > X86::YMM23 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM7 && RegNo <= X86::ZMM15) ||
- (RegNo > X86::ZMM23 && RegNo <= X86::ZMM31))
+ if ((RegNo >= X86::XMM8 && RegNo <= X86::XMM15) ||
+ (RegNo >= X86::XMM24 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM8 && RegNo <= X86::YMM15) ||
+ (RegNo >= X86::YMM24 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM8 && RegNo <= X86::ZMM15) ||
+ (RegNo >= X86::ZMM24 && RegNo <= X86::ZMM31))
return true;
switch (RegNo) {
@@ -762,9 +751,9 @@ namespace X86II {
/// is32ExtendedReg - Is the MemoryOperand a 32 extended (zmm16 or higher)
/// registers? e.g. zmm21, etc.
static inline bool is32ExtendedReg(unsigned RegNo) {
- return ((RegNo > X86::XMM15 && RegNo <= X86::XMM31) ||
- (RegNo > X86::YMM15 && RegNo <= X86::YMM31) ||
- (RegNo > X86::ZMM15 && RegNo <= X86::ZMM31));
+ return ((RegNo >= X86::XMM16 && RegNo <= X86::XMM31) ||
+ (RegNo >= X86::YMM16 && RegNo <= X86::YMM31) ||
+ (RegNo >= X86::ZMM16 && RegNo <= X86::ZMM31));
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
index 736c39dfb6f1..da69da51df10 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp
@@ -9,6 +9,8 @@
#include "MCTargetDesc/X86FixupKinds.h"
#include "MCTargetDesc/X86MCTargetDesc.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCELFObjectWriter.h"
#include "llvm/MC/MCExpr.h"
#include "llvm/MC/MCValue.h"
@@ -25,8 +27,8 @@ namespace {
~X86ELFObjectWriter() override;
protected:
- unsigned GetRelocType(const MCValue &Target, const MCFixup &Fixup,
- bool IsPCRel) const override;
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
};
}
@@ -56,6 +58,7 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_8:
return RT64_64;
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_None && !IsPCRel)
return RT64_32S;
return RT64_32;
@@ -66,6 +69,8 @@ static X86_64RelType getType64(unsigned Kind,
case FK_Data_4:
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
return RT64_32;
case FK_PCRel_2:
@@ -77,8 +82,16 @@ static X86_64RelType getType64(unsigned Kind,
}
}
-static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
- X86_64RelType Type, bool IsPCRel) {
+static void checkIs32(MCContext &Ctx, SMLoc Loc, X86_64RelType Type) {
+ if (Type != RT64_32)
+ Ctx.reportError(Loc,
+ "32 bit reloc applied to a field with a different size");
+}
+
+static unsigned getRelocType64(MCContext &Ctx, SMLoc Loc,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_64RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -146,21 +159,38 @@ static unsigned getRelocType64(MCSymbolRefExpr::VariantKind Modifier,
case RT64_8:
llvm_unreachable("Unimplemented");
}
+ case MCSymbolRefExpr::VK_TLSCALL:
+ return ELF::R_X86_64_TLSDESC_CALL;
+ case MCSymbolRefExpr::VK_TLSDESC:
+ return ELF::R_X86_64_GOTPC32_TLSDESC;
case MCSymbolRefExpr::VK_TLSGD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSGD;
case MCSymbolRefExpr::VK_GOTTPOFF:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_GOTTPOFF;
case MCSymbolRefExpr::VK_TLSLD:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_TLSLD;
case MCSymbolRefExpr::VK_PLT:
- assert(Type == RT64_32);
+ checkIs32(Ctx, Loc, Type);
return ELF::R_X86_64_PLT32;
case MCSymbolRefExpr::VK_GOTPCREL:
- assert(Type == RT64_32);
- return ELF::R_X86_64_GOTPCREL;
+ checkIs32(Ctx, Loc, Type);
+ // Older versions of ld.bfd/ld.gold/lld
+ // do not support GOTPCRELX/REX_GOTPCRELX,
+ // and we want to keep back-compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_X86_64_GOTPCREL;
+ switch (Kind) {
+ default:
+ return ELF::R_X86_64_GOTPCREL;
+ case X86::reloc_riprel_4byte_relax:
+ return ELF::R_X86_64_GOTPCRELX;
+ case X86::reloc_riprel_4byte_relax_rex:
+ case X86::reloc_riprel_4byte_movq_load:
+ return ELF::R_X86_64_REX_GOTPCRELX;
+ }
}
}
@@ -181,8 +211,10 @@ static X86_32RelType getType32(X86_64RelType T) {
llvm_unreachable("unexpected relocation type!");
}
-static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
- X86_32RelType Type, bool IsPCRel) {
+static unsigned getRelocType32(MCContext &Ctx,
+ MCSymbolRefExpr::VariantKind Modifier,
+ X86_32RelType Type, bool IsPCRel,
+ unsigned Kind) {
switch (Modifier) {
default:
llvm_unreachable("Unimplemented");
@@ -197,7 +229,15 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
case MCSymbolRefExpr::VK_GOT:
assert(Type == RT32_32);
- return IsPCRel ? ELF::R_386_GOTPC : ELF::R_386_GOT32;
+ if (IsPCRel)
+ return ELF::R_386_GOTPC;
+ // Older versions of ld.bfd/ld.gold/lld do not support R_386_GOT32X and we
+ // want to maintain compatibility.
+ if (!Ctx.getAsmInfo()->canRelaxRelocations())
+ return ELF::R_386_GOT32;
+
+ return Kind == X86::reloc_signed_4byte_relax ? ELF::R_386_GOT32X
+ : ELF::R_386_GOT32;
case MCSymbolRefExpr::VK_GOTOFF:
assert(Type == RT32_32);
assert(!IsPCRel);
@@ -240,17 +280,18 @@ static unsigned getRelocType32(MCSymbolRefExpr::VariantKind Modifier,
}
}
-unsigned X86ELFObjectWriter::GetRelocType(const MCValue &Target,
+unsigned X86ELFObjectWriter::getRelocType(MCContext &Ctx, const MCValue &Target,
const MCFixup &Fixup,
bool IsPCRel) const {
MCSymbolRefExpr::VariantKind Modifier = Target.getAccessVariant();
- X86_64RelType Type = getType64(Fixup.getKind(), Modifier, IsPCRel);
+ unsigned Kind = Fixup.getKind();
+ X86_64RelType Type = getType64(Kind, Modifier, IsPCRel);
if (getEMachine() == ELF::EM_X86_64)
- return getRelocType64(Modifier, Type, IsPCRel);
+ return getRelocType64(Ctx, Fixup.getLoc(), Modifier, Type, IsPCRel, Kind);
assert((getEMachine() == ELF::EM_386 || getEMachine() == ELF::EM_IAMCU) &&
"Unsupported ELF machine type.");
- return getRelocType32(Modifier, getType32(Type), IsPCRel);
+ return getRelocType32(Ctx, Modifier, getType32(Type), IsPCRel, Kind);
}
MCObjectWriter *llvm::createX86ELFObjectWriter(raw_pwrite_stream &OS,
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
deleted file mode 100644
index ddb764facdbf..000000000000
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86ELFRelocationInfo.cpp
+++ /dev/null
@@ -1,141 +0,0 @@
-//===-- X86ELFRelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/ELFObjectFile.h"
-#include "llvm/Support/ELF.h"
-
-using namespace llvm;
-using namespace object;
-using namespace ELF;
-
-namespace {
-class X86_64ELFRelocationInfo : public MCRelocationInfo {
-public:
- X86_64ELFRelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- uint64_t RelType = Rel.getType();
- elf_symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
-
- ErrorOr<uint64_t> SymAddr = SymI->getAddress();
- if (std::error_code EC = SymAddr.getError())
- report_fatal_error(EC.message());
- uint64_t SymSize = SymI->getSize();
- int64_t Addend = *ELFRelocationRef(Rel).getAddend();
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(*SymAddr, Ctx));
-
- const MCExpr *Expr = nullptr;
- // If hasAddend is true, then we need to add Addend (r_addend) to Expr.
- bool hasAddend = false;
-
- // The AMD64 SysV ABI says:
- // A: the addend used to compute the value of the relocatable field.
- // B: the base address at which a shared object has been loaded into memory
- // during execution. Generally, a shared object is built with a 0 base
- // virtual address, but the execution address will be different.
- // G: the offset into the global offset table at which the relocation
- // entry's symbol will reside during execution.
- // GOT: the address of the global offset table.
- // L: the place (section offset or address) of the Procedure Linkage Table
- // entry for a symbol.
- // P: the place (section offset or address) of the storage unit being
- // relocated (computed using r_offset).
- // S: the value of the symbol whose index resides in the relocation entry.
- // Z: the size of the symbol whose index resides in the relocation entry.
-
- switch(RelType) {
- case R_X86_64_NONE:
- case R_X86_64_COPY:
- // none
- break;
- case R_X86_64_64:
- case R_X86_64_16:
- case R_X86_64_8:
- // S + A
- case R_X86_64_32:
- case R_X86_64_32S:
- // S + A (We don't care about the result not fitting in 32 bits.)
- case R_X86_64_PC32:
- case R_X86_64_PC16:
- case R_X86_64_PC8:
- case R_X86_64_PC64:
- // S + A - P (P/pcrel is implicit)
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOT32:
- case R_X86_64_GOT64:
- case R_X86_64_GOTPC32:
- case R_X86_64_GOTPC64:
- case R_X86_64_GOTPLT64:
- // G + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOT, Ctx);
- break;
- case R_X86_64_PLT32:
- // L + A - P -> S@PLT + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_PLT, Ctx);
- break;
- case R_X86_64_GLOB_DAT:
- case R_X86_64_JUMP_SLOT:
- // S
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- case R_X86_64_GOTPCREL:
- case R_X86_64_GOTPCREL64:
- // G + GOT + A - P -> S@GOTPCREL + A
- hasAddend = true;
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case R_X86_64_GOTOFF64:
- // S + A - GOT
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTOFF, Ctx);
- break;
- case R_X86_64_PLTOFF64:
- // L + A - GOT
- break;
- case R_X86_64_SIZE32:
- case R_X86_64_SIZE64:
- // Z + A
- Expr = MCConstantExpr::create(SymSize, Ctx);
- break;
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- if (Expr && hasAddend && Addend != 0)
- Expr = MCBinaryExpr::createAdd(Expr,
- MCConstantExpr::create(Addend, Ctx),
- Ctx);
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86ELFRelocationInfo - Construct an X86 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64ELFRelocationInfo(MCContext &Ctx) {
- // We only handle x86-64 for now.
- return new X86_64ELFRelocationInfo(Ctx);
-}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
index 4899900dcef9..dfdc9ec29aec 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86FixupKinds.h
@@ -17,9 +17,15 @@ namespace X86 {
enum Fixups {
reloc_riprel_4byte = FirstTargetFixupKind, // 32-bit rip-relative
reloc_riprel_4byte_movq_load, // 32-bit rip-relative in movq
+ reloc_riprel_4byte_relax, // 32-bit rip-relative in relaxable
+ // instruction
+ reloc_riprel_4byte_relax_rex, // 32-bit rip-relative in relaxable
+ // instruction with rex prefix
reloc_signed_4byte, // 32-bit signed. Unlike FK_Data_4
// this will be sign extended at
// runtime.
+ reloc_signed_4byte_relax, // like reloc_signed_4byte, but
+ // in a relaxable instruction.
reloc_global_offset_table, // 32-bit, relative to the start
// of the instruction. Used only
// for _GLOBAL_OFFSET_TABLE_.
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
index fc0b0f89e23d..b7c56cec2db8 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCAsmInfo.cpp
@@ -35,7 +35,7 @@ AsmWriterFlavor("x86-asm-syntax", cl::init(ATT),
clEnumValEnd));
static cl::opt<bool>
-MarkedJTDataRegions("mark-data-regions", cl::init(false),
+MarkedJTDataRegions("mark-data-regions", cl::init(true),
cl::desc("Mark code section jump table data regions."),
cl::Hidden);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
index dfab6ec10775..96c2e81c332a 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCCodeEmitter.cpp
@@ -76,36 +76,16 @@ public:
return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg()) & 0x7;
}
- // On regular x86, both XMM0-XMM7 and XMM8-XMM15 are encoded in the range
- // 0-7 and the difference between the 2 groups is given by the REX prefix.
- // In the VEX prefix, registers are seen sequencially from 0-15 and encoded
- // in 1's complement form, example:
- //
- // ModRM field => XMM9 => 1
- // VEX.VVVV => XMM9 => ~9
- //
- // See table 4-35 of Intel AVX Programming Reference for details.
- unsigned char getVEXRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- unsigned SrcReg = MI.getOperand(OpNum).getReg();
- unsigned SrcRegNum = GetX86RegNum(MI.getOperand(OpNum));
- if (X86II::isX86_64ExtendedReg(SrcReg))
- SrcRegNum |= 8;
-
- // The registers represented through VEX_VVVV should
- // be encoded in 1's complement form.
- return (~SrcRegNum) & 0xf;
+ unsigned getX86RegEncoding(const MCInst &MI, unsigned OpNum) const {
+ return Ctx.getRegisterInfo()->getEncodingValue(
+ MI.getOperand(OpNum).getReg());
}
- unsigned char getWriteMaskRegisterEncoding(const MCInst &MI,
- unsigned OpNum) const {
- assert(X86::K0 != MI.getOperand(OpNum).getReg() &&
- "Invalid mask register as write-mask!");
- unsigned MaskRegNum = GetX86RegNum(MI.getOperand(OpNum));
- return MaskRegNum;
+ bool isX86_64ExtendedReg(const MCInst &MI, unsigned OpNum) const {
+ return (getX86RegEncoding(MI, OpNum) >> 3) & 1;
}
- void EmitByte(unsigned char C, unsigned &CurByte, raw_ostream &OS) const {
+ void EmitByte(uint8_t C, unsigned &CurByte, raw_ostream &OS) const {
OS << (char)C;
++CurByte;
}
@@ -125,8 +105,8 @@ public:
SmallVectorImpl<MCFixup> &Fixups,
int ImmOffset = 0) const;
- inline static unsigned char ModRMByte(unsigned Mod, unsigned RegOpcode,
- unsigned RM) {
+ inline static uint8_t ModRMByte(unsigned Mod, unsigned RegOpcode,
+ unsigned RM) {
assert(Mod < 4 && RegOpcode < 8 && RM < 8 && "ModRM Fields out of range!");
return RM | (RegOpcode << 3) | (Mod << 6);
}
@@ -142,11 +122,9 @@ public:
EmitByte(ModRMByte(SS, Index, Base), CurByte, OS);
}
-
- void EmitMemModRMByte(const MCInst &MI, unsigned Op,
- unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte, raw_ostream &OS,
- SmallVectorImpl<MCFixup> &Fixups,
+ void emitMemModRMByte(const MCInst &MI, unsigned Op, unsigned RegOpcodeField,
+ uint64_t TSFlags, bool Rex, unsigned &CurByte,
+ raw_ostream &OS, SmallVectorImpl<MCFixup> &Fixups,
const MCSubtargetInfo &STI) const;
void encodeInstruction(const MCInst &MI, raw_ostream &OS,
@@ -160,10 +138,12 @@ public:
void EmitSegmentOverridePrefix(unsigned &CurByte, unsigned SegOperand,
const MCInst &MI, raw_ostream &OS) const;
- void EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
+ bool emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, int MemOperand,
const MCInst &MI, const MCInstrDesc &Desc,
- const MCSubtargetInfo &STI,
- raw_ostream &OS) const;
+ const MCSubtargetInfo &STI, raw_ostream &OS) const;
+
+ uint8_t DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand, const MCInstrDesc &Desc) const;
};
} // end anonymous namespace
@@ -177,7 +157,7 @@ MCCodeEmitter *llvm::createX86MCCodeEmitter(const MCInstrInfo &MCII,
/// isDisp8 - Return true if this signed displacement fits in a 8-bit
/// sign-extended field.
static bool isDisp8(int Value) {
- return Value == (signed char)Value;
+ return Value == (int8_t)Value;
}
/// isCDisp8 - Return true if this signed displacement fits in a 8-bit
@@ -198,7 +178,7 @@ static bool isCDisp8(uint64_t TSFlags, int Value, int& CValue) {
if (Value & Mask) // Unaligned offset
return false;
Value /= (int)CD8_Scale;
- bool Ret = (Value == (signed char)Value);
+ bool Ret = (Value == (int8_t)Value);
if (Ret)
CValue = Value;
@@ -231,6 +211,10 @@ static bool Is32BitMemOperand(const MCInst &MI, unsigned Op) {
(IndexReg.getReg() != 0 &&
X86MCRegisterClasses[X86::GR32RegClassID].contains(IndexReg.getReg())))
return true;
+ if (BaseReg.getReg() == X86::EIP) {
+ assert(IndexReg.getReg() == 0 && "Invalid eip-based address.");
+ return true;
+ }
return false;
}
@@ -343,7 +327,9 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
// the start of the field, not the end of the field.
if (FixupKind == FK_PCRel_4 ||
FixupKind == MCFixupKind(X86::reloc_riprel_4byte) ||
- FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load))
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_movq_load) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax) ||
+ FixupKind == MCFixupKind(X86::reloc_riprel_4byte_relax_rex))
ImmOffset -= 4;
if (FixupKind == FK_PCRel_2)
ImmOffset -= 2;
@@ -359,12 +345,12 @@ EmitImmediate(const MCOperand &DispOp, SMLoc Loc, unsigned Size,
EmitConstant(0, Size, CurByte, OS);
}
-void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
+void X86MCCodeEmitter::emitMemModRMByte(const MCInst &MI, unsigned Op,
unsigned RegOpcodeField,
- uint64_t TSFlags, unsigned &CurByte,
- raw_ostream &OS,
+ uint64_t TSFlags, bool Rex,
+ unsigned &CurByte, raw_ostream &OS,
SmallVectorImpl<MCFixup> &Fixups,
- const MCSubtargetInfo &STI) const{
+ const MCSubtargetInfo &STI) const {
const MCOperand &Disp = MI.getOperand(Op+X86::AddrDisp);
const MCOperand &Base = MI.getOperand(Op+X86::AddrBaseReg);
const MCOperand &Scale = MI.getOperand(Op+X86::AddrScaleAmt);
@@ -373,18 +359,38 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
bool HasEVEX = (TSFlags & X86II::EncodingMask) == X86II::EVEX;
// Handle %rip relative addressing.
- if (BaseReg == X86::RIP) { // [disp32+RIP] in X86-64 mode
+ if (BaseReg == X86::RIP ||
+ BaseReg == X86::EIP) { // [disp32+rIP] in X86-64 mode
assert(is64BitMode(STI) && "Rip-relative addressing requires 64-bit mode");
assert(IndexReg.getReg() == 0 && "Invalid rip-relative address");
EmitByte(ModRMByte(0, RegOpcodeField, 5), CurByte, OS);
- unsigned FixupKind = X86::reloc_riprel_4byte;
-
+ unsigned Opcode = MI.getOpcode();
// movq loads are handled with a special relocation form which allows the
// linker to eliminate some loads for GOT references which end up in the
// same linkage unit.
- if (MI.getOpcode() == X86::MOV64rm)
- FixupKind = X86::reloc_riprel_4byte_movq_load;
+ unsigned FixupKind = [=]() {
+ switch (Opcode) {
+ default:
+ return X86::reloc_riprel_4byte;
+ case X86::MOV64rm:
+ assert(Rex);
+ return X86::reloc_riprel_4byte_movq_load;
+ case X86::CALL64m:
+ case X86::JMP64m:
+ case X86::TEST64rm:
+ case X86::ADC64rm:
+ case X86::ADD64rm:
+ case X86::AND64rm:
+ case X86::CMP64rm:
+ case X86::OR64rm:
+ case X86::SBB64rm:
+ case X86::SUB64rm:
+ case X86::XOR64rm:
+ return Rex ? X86::reloc_riprel_4byte_relax_rex
+ : X86::reloc_riprel_4byte_relax;
+ }
+ }();
// rip-relative addressing is actually relative to the *next* instruction.
// Since an immediate can follow the mod/rm byte for an instruction, this
@@ -510,8 +516,11 @@ void X86MCCodeEmitter::EmitMemModRMByte(const MCInst &MI, unsigned Op,
// Otherwise, emit the most general non-SIB encoding: [REG+disp32]
EmitByte(ModRMByte(2, RegOpcodeField, BaseRegNo), CurByte, OS);
- EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(X86::reloc_signed_4byte),
- CurByte, OS, Fixups);
+ unsigned Opcode = MI.getOpcode();
+ unsigned FixupKind = Opcode == X86::MOV32rm ? X86::reloc_signed_4byte_relax
+ : X86::reloc_signed_4byte;
+ EmitImmediate(Disp, MI.getLoc(), 4, MCFixupKind(FixupKind), CurByte, OS,
+ Fixups);
return;
}
@@ -603,26 +612,26 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 1: Same as REX_R=0 (must be 1 in 32-bit mode)
// 0: Same as REX_R=1 (64 bit mode only)
//
- unsigned char VEX_R = 0x1;
- unsigned char EVEX_R2 = 0x1;
+ uint8_t VEX_R = 0x1;
+ uint8_t EVEX_R2 = 0x1;
// VEX_X: equivalent to REX.X, only used when a
// register is used for index in SIB Byte.
//
// 1: Same as REX.X=0 (must be 1 in 32-bit mode)
// 0: Same as REX.X=1 (64-bit mode only)
- unsigned char VEX_X = 0x1;
+ uint8_t VEX_X = 0x1;
// VEX_B:
//
// 1: Same as REX_B=0 (ignored in 32-bit mode)
// 0: Same as REX_B=1 (64 bit mode only)
//
- unsigned char VEX_B = 0x1;
+ uint8_t VEX_B = 0x1;
// VEX_W: opcode specific (use like REX.W, or used for
// opcode extension, or ignored, depending on the opcode byte)
- unsigned char VEX_W = 0;
+ uint8_t VEX_W = (TSFlags & X86II::VEX_W) ? 1 : 0;
// VEX_5M (VEX m-mmmmm field):
//
@@ -634,20 +643,31 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b01000: XOP map select - 08h instructions with imm byte
// 0b01001: XOP map select - 09h instructions with no imm byte
// 0b01010: XOP map select - 0Ah instructions with imm dword
- unsigned char VEX_5M = 0;
+ uint8_t VEX_5M;
+ switch (TSFlags & X86II::OpMapMask) {
+ default: llvm_unreachable("Invalid prefix!");
+ case X86II::TB: VEX_5M = 0x1; break; // 0F
+ case X86II::T8: VEX_5M = 0x2; break; // 0F 38
+ case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
+ case X86II::XOP8: VEX_5M = 0x8; break;
+ case X86II::XOP9: VEX_5M = 0x9; break;
+ case X86II::XOPA: VEX_5M = 0xA; break;
+ }
// VEX_4V (VEX vvvv field): a register specifier
// (in 1's complement form) or 1111 if unused.
- unsigned char VEX_4V = 0xf;
- unsigned char EVEX_V2 = 0x1;
+ uint8_t VEX_4V = 0xf;
+ uint8_t EVEX_V2 = 0x1;
- // VEX_L (Vector Length):
+ // EVEX_L2/VEX_L (Vector Length):
//
- // 0: scalar or 128-bit vector
- // 1: 256-bit vector
+ // L2 L
+ // 0 0: scalar or 128-bit vector
+ // 0 1: 256-bit vector
+ // 1 0: 512-bit vector
//
- unsigned char VEX_L = 0;
- unsigned char EVEX_L2 = 0;
+ uint8_t VEX_L = (TSFlags & X86II::VEX_L) ? 1 : 0;
+ uint8_t EVEX_L2 = (TSFlags & X86II::EVEX_L2) ? 1 : 0;
// VEX_PP: opcode extension providing equivalent
// functionality of a SIMD prefix
@@ -657,56 +677,32 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// 0b10: F3
// 0b11: F2
//
- unsigned char VEX_PP = 0;
+ uint8_t VEX_PP;
+ switch (TSFlags & X86II::OpPrefixMask) {
+ default: llvm_unreachable("Invalid op prefix!");
+ case X86II::PS: VEX_PP = 0x0; break; // none
+ case X86II::PD: VEX_PP = 0x1; break; // 66
+ case X86II::XS: VEX_PP = 0x2; break; // F3
+ case X86II::XD: VEX_PP = 0x3; break; // F2
+ }
// EVEX_U
- unsigned char EVEX_U = 1; // Always '1' so far
+ uint8_t EVEX_U = 1; // Always '1' so far
// EVEX_z
- unsigned char EVEX_z = 0;
+ uint8_t EVEX_z = (HasEVEX_K && (TSFlags & X86II::EVEX_Z)) ? 1 : 0;
// EVEX_b
- unsigned char EVEX_b = 0;
+ uint8_t EVEX_b = (TSFlags & X86II::EVEX_B) ? 1 : 0;
// EVEX_rc
- unsigned char EVEX_rc = 0;
+ uint8_t EVEX_rc = 0;
// EVEX_aaa
- unsigned char EVEX_aaa = 0;
+ uint8_t EVEX_aaa = 0;
bool EncodeRC = false;
- if (TSFlags & X86II::VEX_W)
- VEX_W = 1;
-
- if (TSFlags & X86II::VEX_L)
- VEX_L = 1;
- if (TSFlags & X86II::EVEX_L2)
- EVEX_L2 = 1;
-
- if (HasEVEX_K && (TSFlags & X86II::EVEX_Z))
- EVEX_z = 1;
-
- if ((TSFlags & X86II::EVEX_B))
- EVEX_b = 1;
-
- switch (TSFlags & X86II::OpPrefixMask) {
- default: break; // VEX_PP already correct
- case X86II::PD: VEX_PP = 0x1; break; // 66
- case X86II::XS: VEX_PP = 0x2; break; // F3
- case X86II::XD: VEX_PP = 0x3; break; // F2
- }
-
- switch (TSFlags & X86II::OpMapMask) {
- default: llvm_unreachable("Invalid prefix!");
- case X86II::TB: VEX_5M = 0x1; break; // 0F
- case X86II::T8: VEX_5M = 0x2; break; // 0F 38
- case X86II::TA: VEX_5M = 0x3; break; // 0F 3A
- case X86II::XOP8: VEX_5M = 0x8; break;
- case X86II::XOP9: VEX_5M = 0x9; break;
- case X86II::XOPA: VEX_5M = 0xA; break;
- }
-
// Classify VEX_B, VEX_4V, VEX_R, VEX_X
unsigned NumOps = Desc.getNumOperands();
unsigned CurOp = X86II::getOperandBias(Desc);
@@ -721,38 +717,30 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr, src1(VEX_4V), src2(ModR/M)
// MemAddr, src1(ModR/M), imm8
//
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
CurOp += X86::AddrNumOperands;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- const MCOperand &MO = MI.getOperand(CurOp);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MO.getReg()))
- EVEX_R2 = 0x0;
- }
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
break;
}
- case X86II::MRMSrcMem:
+ case X86II::MRMSrcMem: {
// MRMSrcMem instructions forms:
// src1(ModR/M), MemAddr
// src1(ModR/M), src2(VEX_4V), MemAddr
@@ -762,31 +750,25 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(MemOperand +
- X86::AddrIndexReg).getReg()))
- EVEX_V2 = 0x0;
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
+ if (!HasVEX_4V) // Only needed with VSIB which don't use VVVV.
+ EVEX_V2 = ~(IndexRegEnc >> 4) & 1;
if (HasVEX_4VOp3)
// Instruction format for 4VOp3:
@@ -794,8 +776,9 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// CurOp points to start of the MemoryOperand,
// it skips TIED_TO operands if exist, then increments past src1.
// CurOp + X86::AddrNumOperands will point to src3.
- VEX_4V = getVEXRegisterEncoding(MI, CurOp+X86::AddrNumOperands);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp + X86::AddrNumOperands) & 0xf;
break;
+ }
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
@@ -804,24 +787,21 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// MemAddr
// src1(VEX_4V), MemAddr
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
-
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrBaseReg).getReg()))
- VEX_B = 0x0;
- if (X86II::isX86_64ExtendedReg(
- MI.getOperand(MemOperand+X86::AddrIndexReg).getReg()))
- VEX_X = 0x0;
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
+
+ unsigned BaseRegEnc = getX86RegEncoding(MI, MemOperand + X86::AddrBaseReg);
+ VEX_B = ~(BaseRegEnc >> 3) & 1;
+ unsigned IndexRegEnc = getX86RegEncoding(MI, MemOperand+X86::AddrIndexReg);
+ VEX_X = ~(IndexRegEnc >> 3) & 1;
break;
}
- case X86II::MRMSrcReg:
+ case X86II::MRMSrcReg: {
// MRMSrcReg instructions forms:
// dst(ModR/M), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M), src1(ModR/M)
@@ -830,32 +810,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// FMA4:
// dst(ModR/M.reg), src1(VEX_4V), src2(ModR/M), src3(VEX_I8IMM)
// dst(ModR/M.reg), src1(VEX_4V), src2(VEX_I8IMM), src3(ModR/M),
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasMemOp4) // Skip second register source (encoded in I8IMM)
CurOp++;
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasVEX_4VOp3)
- VEX_4V = getVEXRegisterEncoding(MI, CurOp++);
+ VEX_4V = ~getX86RegEncoding(MI, CurOp++) & 0xf;
if (EVEX_b) {
if (HasEVEX_RC) {
unsigned RcOperand = NumOps-1;
@@ -865,55 +840,52 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EncodeRC = true;
}
break;
- case X86II::MRMDestReg:
+ }
+ case X86II::MRMDestReg: {
// MRMDestReg instructions forms:
// dst(ModR/M), src(ModR/M)
// dst(ModR/M), src(ModR/M), imm8
// dst(ModR/M), src1(VEX_4V), src2(ModR/M)
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
- CurOp++;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_R = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_R2 = 0x0;
+ RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_R = ~(RegEnc >> 3) & 1;
+ EVEX_R2 = ~(RegEnc >> 4) & 1;
if (EVEX_b)
EncodeRC = true;
break;
+ }
case X86II::MRM0r: case X86II::MRM1r:
case X86II::MRM2r: case X86II::MRM3r:
case X86II::MRM4r: case X86II::MRM5r:
- case X86II::MRM6r: case X86II::MRM7r:
+ case X86II::MRM6r: case X86II::MRM7r: {
// MRM0r-MRM7r instructions forms:
// dst(VEX_4V), src(ModR/M), imm8
if (HasVEX_4V) {
- VEX_4V = getVEXRegisterEncoding(MI, CurOp);
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- EVEX_V2 = 0x0;
- CurOp++;
+ unsigned VRegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_4V = ~VRegEnc & 0xf;
+ EVEX_V2 = ~(VRegEnc >> 4) & 1;
}
if (HasEVEX_K)
- EVEX_aaa = getWriteMaskRegisterEncoding(MI, CurOp++);
+ EVEX_aaa = getX86RegEncoding(MI, CurOp++);
- if (X86II::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_B = 0x0;
- if (X86II::is32ExtendedReg(MI.getOperand(CurOp).getReg()))
- VEX_X = 0x0;
+ unsigned RegEnc = getX86RegEncoding(MI, CurOp++);
+ VEX_B = ~(RegEnc >> 3) & 1;
+ VEX_X = ~(RegEnc >> 4) & 1;
break;
}
+ }
if (Encoding == X86II::VEX || Encoding == X86II::XOP) {
// VEX opcode prefix can have 2 or 3 bytes
@@ -931,7 +903,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// +-----+ +--------------+ +-------------------+
// | 8Fh | | RXB | m-mmmm | | W | vvvv | L | pp |
// +-----+ +--------------+ +-------------------+
- unsigned char LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
+ uint8_t LastByte = VEX_PP | (VEX_L << 2) | (VEX_4V << 3);
// Can we use the 2 byte VEX prefix?
if (Encoding == X86II::VEX && VEX_B && VEX_X && !VEX_W && (VEX_5M == 1)) {
@@ -954,8 +926,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
assert((VEX_5M & 0x3) == VEX_5M
&& "More than 2 significant bits in VEX.m-mmmm fields for EVEX!");
- VEX_5M &= 0x3;
-
EmitByte(0x62, CurByte, OS);
EmitByte((VEX_R << 7) |
(VEX_X << 6) |
@@ -968,26 +938,27 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
VEX_PP, CurByte, OS);
if (EncodeRC)
EmitByte((EVEX_z << 7) |
- (EVEX_rc << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_rc << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
else
EmitByte((EVEX_z << 7) |
- (EVEX_L2 << 6) |
- (VEX_L << 5) |
- (EVEX_b << 4) |
- (EVEX_V2 << 3) |
- EVEX_aaa, CurByte, OS);
+ (EVEX_L2 << 6) |
+ (VEX_L << 5) |
+ (EVEX_b << 4) |
+ (EVEX_V2 << 3) |
+ EVEX_aaa, CurByte, OS);
}
}
/// DetermineREXPrefix - Determine if the MCInst has to be encoded with a X86-64
/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand
/// size, and 3) use of X86-64 extended registers.
-static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
- const MCInstrDesc &Desc) {
- unsigned REX = 0;
+uint8_t X86MCCodeEmitter::DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
+ int MemOperand,
+ const MCInstrDesc &Desc) const {
+ uint8_t REX = 0;
bool UsesHighByteReg = false;
if (TSFlags & X86II::REX_W)
@@ -996,13 +967,10 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
if (MI.getNumOperands() == 0) return REX;
unsigned NumOps = MI.getNumOperands();
- // FIXME: MCInst should explicitize the two-addrness.
- bool isTwoAddr = NumOps > 1 &&
- Desc.getOperandConstraint(1, MCOI::TIED_TO) != -1;
+ unsigned CurOp = X86II::getOperandBias(Desc);
// If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix.
- unsigned i = isTwoAddr ? 1 : 0;
- for (; i != NumOps; ++i) {
+ for (unsigned i = CurOp; i != NumOps; ++i) {
const MCOperand &MO = MI.getOperand(i);
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
@@ -1016,65 +984,44 @@ static unsigned DetermineREXPrefix(const MCInst &MI, uint64_t TSFlags,
}
switch (TSFlags & X86II::FormMask) {
+ case X86II::AddRegFrm:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ break;
case X86II::MRMSrcReg:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 0; // set REX.B
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
case X86II::MRMSrcMem: {
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- i = isTwoAddr ? 2 : 1;
- for (; i != NumOps; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // set REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
break;
}
+ case X86II::MRMDestReg:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
+ case X86II::MRMDestMem:
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
+ CurOp += X86::AddrNumOperands;
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 2; // REX.R
+ break;
case X86II::MRMXm:
case X86II::MRM0m: case X86II::MRM1m:
case X86II::MRM2m: case X86II::MRM3m:
case X86II::MRM4m: case X86II::MRM5m:
case X86II::MRM6m: case X86II::MRM7m:
- case X86II::MRMDestMem: {
- unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands);
- i = isTwoAddr ? 1 : 0;
- if (NumOps > e && MI.getOperand(e).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(e).getReg()))
- REX |= 1 << 2; // set REX.R
- unsigned Bit = 0;
- for (; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg()) {
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << Bit; // REX.B (Bit=0) and REX.X (Bit=1)
- Bit++;
- }
- }
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrBaseReg) << 0; // REX.B
+ REX |= isX86_64ExtendedReg(MI, MemOperand+X86::AddrIndexReg) << 1; // REX.X
break;
- }
- default:
- if (MI.getOperand(0).isReg() &&
- X86II::isX86_64ExtendedReg(MI.getOperand(0).getReg()))
- REX |= 1 << 0; // set REX.B
- i = isTwoAddr ? 2 : 1;
- for (unsigned e = NumOps; i != e; ++i) {
- const MCOperand &MO = MI.getOperand(i);
- if (MO.isReg() && X86II::isX86_64ExtendedReg(MO.getReg()))
- REX |= 1 << 2; // set REX.R
- }
+ case X86II::MRMXr:
+ case X86II::MRM0r: case X86II::MRM1r:
+ case X86II::MRM2r: case X86II::MRM3r:
+ case X86II::MRM4r: case X86II::MRM5r:
+ case X86II::MRM6r: case X86II::MRM7r:
+ REX |= isX86_64ExtendedReg(MI, CurOp++) << 0; // REX.B
break;
}
if (REX && UsesHighByteReg)
@@ -1101,16 +1048,18 @@ void X86MCCodeEmitter::EmitSegmentOverridePrefix(unsigned &CurByte,
}
}
-/// EmitOpcodePrefix - Emit all instruction prefixes prior to the opcode.
+/// Emit all instruction prefixes prior to the opcode.
///
/// MemOperand is the operand # of the start of a memory operand if present. If
/// Not present, it is -1.
-void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
+///
+/// Returns true if a REX prefix was used.
+bool X86MCCodeEmitter::emitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
int MemOperand, const MCInst &MI,
const MCInstrDesc &Desc,
const MCSubtargetInfo &STI,
raw_ostream &OS) const {
-
+ bool Ret = false;
// Emit the operand size opcode prefix as needed.
if ((TSFlags & X86II::OpSizeMask) == (is16BitMode(STI) ? X86II::OpSize32
: X86II::OpSize16))
@@ -1135,8 +1084,10 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
// Handle REX prefix.
// FIXME: Can this come before F2 etc to simplify emission?
if (is64BitMode(STI)) {
- if (unsigned REX = DetermineREXPrefix(MI, TSFlags, Desc))
+ if (uint8_t REX = DetermineREXPrefix(MI, TSFlags, MemOperand, Desc)) {
EmitByte(0x40 | REX, CurByte, OS);
+ Ret = true;
+ }
}
// 0x0F escape code must be emitted just before the opcode.
@@ -1156,6 +1107,7 @@ void X86MCCodeEmitter::EmitOpcodePrefix(uint64_t TSFlags, unsigned &CurByte,
EmitByte(0x3A, CurByte, OS);
break;
}
+ return Ret;
}
void X86MCCodeEmitter::
@@ -1183,14 +1135,18 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
bool HasVEX_4V = TSFlags & X86II::VEX_4V;
bool HasVEX_4VOp3 = TSFlags & X86II::VEX_4VOp3;
bool HasMemOp4 = TSFlags & X86II::MemOp4;
- const unsigned MemOp4_I8IMMOperand = 2;
+ bool HasVEX_I8IMM = TSFlags & X86II::VEX_I8IMM;
+ assert((!HasMemOp4 || HasVEX_I8IMM) && "MemOp4 should imply VEX_I8IMM");
// It uses the EVEX.aaa field?
bool HasEVEX_K = TSFlags & X86II::EVEX_K;
bool HasEVEX_RC = TSFlags & X86II::EVEX_RC;
+ // Used if a register is encoded in 7:4 of immediate.
+ unsigned I8RegNum = 0;
+
// Determine where the memory operand starts, if present.
- int MemoryOperand = X86II::getMemoryOperandNo(TSFlags, Opcode);
+ int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
if (MemoryOperand != -1) MemoryOperand += CurOp;
// Emit segment override opcode prefix as needed.
@@ -1226,19 +1182,20 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (need_address_override)
EmitByte(0x67, CurByte, OS);
+ bool Rex = false;
if (Encoding == 0)
- EmitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
+ Rex = emitOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, STI, OS);
else
EmitVEXOpcodePrefix(TSFlags, CurByte, MemoryOperand, MI, Desc, OS);
- unsigned char BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
+ uint8_t BaseOpcode = X86II::getBaseOpcodeFor(TSFlags);
if (TSFlags & X86II::Has3DNow0F0FOpcode)
BaseOpcode = 0x0F; // Weird 3DNow! encoding.
- unsigned SrcRegNum = 0;
- switch (TSFlags & X86II::FormMask) {
- default: errs() << "FORM: " << (TSFlags & X86II::FormMask) << "\n";
+ uint64_t Form = TSFlags & X86II::FormMask;
+ switch (Form) {
+ default: errs() << "FORM: " << Form << "\n";
llvm_unreachable("Unknown FormMask value in X86MCCodeEmitter!");
case X86II::Pseudo:
llvm_unreachable("Pseudo instruction shouldn't be emitted");
@@ -1315,12 +1272,12 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS);
break;
- case X86II::MRMDestReg:
+ case X86II::MRMDestReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
@@ -1329,71 +1286,68 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
GetX86RegNum(MI.getOperand(SrcRegNum)), CurByte, OS);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMDestMem:
+ }
+ case X86II::MRMDestMem: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + X86::AddrNumOperands;
+ unsigned SrcRegNum = CurOp + X86::AddrNumOperands;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- EmitMemModRMByte(MI, CurOp,
- GetX86RegNum(MI.getOperand(SrcRegNum)),
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp, GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp = SrcRegNum + 1;
break;
-
- case X86II::MRMSrcReg:
+ }
+ case X86II::MRMSrcReg: {
EmitByte(BaseOpcode, CurByte, OS);
- SrcRegNum = CurOp + 1;
+ unsigned SrcRegNum = CurOp + 1;
if (HasEVEX_K) // Skip writemask
- SrcRegNum++;
+ ++SrcRegNum;
if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV)
++SrcRegNum;
- if (HasMemOp4) // Skip 2nd src (which is encoded in I8IMM)
- ++SrcRegNum;
+ if (HasMemOp4) // Capture 2nd src (which is encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, SrcRegNum++);
EmitRegModRMByte(MI.getOperand(SrcRegNum),
GetX86RegNum(MI.getOperand(CurOp)), CurByte, OS);
-
- // 2 operands skipped with HasMemOp4, compensate accordingly
- CurOp = HasMemOp4 ? SrcRegNum : SrcRegNum + 1;
+ CurOp = SrcRegNum + 1;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
// do not count the rounding control operand
if (HasEVEX_RC)
- NumOps--;
+ --NumOps;
break;
-
+ }
case X86II::MRMSrcMem: {
- int AddrOperands = X86::AddrNumOperands;
unsigned FirstMemOp = CurOp+1;
- if (HasEVEX_K) { // Skip writemask
- ++AddrOperands;
+ if (HasEVEX_K) // Skip writemask
++FirstMemOp;
- }
- if (HasVEX_4V) {
- ++AddrOperands;
+ if (HasVEX_4V)
++FirstMemOp; // Skip the register source (which is encoded in VEX_VVVV).
- }
- if (HasMemOp4) // Skip second register source (encoded in I8IMM)
- ++FirstMemOp;
+
+ if (HasMemOp4) // Capture second register source (encoded in I8IMM)
+ I8RegNum = getX86RegEncoding(MI, FirstMemOp++);
EmitByte(BaseOpcode, CurByte, OS);
- EmitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
- TSFlags, CurByte, OS, Fixups, STI);
- CurOp += AddrOperands + 1;
+ emitMemModRMByte(MI, FirstMemOp, GetX86RegNum(MI.getOperand(CurOp)),
+ TSFlags, Rex, CurByte, OS, Fixups, STI);
+ CurOp = FirstMemOp + X86::AddrNumOperands;
if (HasVEX_4VOp3)
++CurOp;
+ if (!HasMemOp4 && HasVEX_I8IMM)
+ I8RegNum = getX86RegEncoding(MI, CurOp++);
break;
}
@@ -1407,7 +1361,6 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
EmitRegModRMByte(MI.getOperand(CurOp++),
(Form == X86II::MRMXr) ? 0 : Form-X86II::MRM0r,
CurByte, OS);
@@ -1424,9 +1377,9 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
if (HasEVEX_K) // Skip writemask
++CurOp;
EmitByte(BaseOpcode, CurByte, OS);
- uint64_t Form = TSFlags & X86II::FormMask;
- EmitMemModRMByte(MI, CurOp, (Form == X86II::MRMXm) ? 0 : Form-X86II::MRM0m,
- TSFlags, CurByte, OS, Fixups, STI);
+ emitMemModRMByte(MI, CurOp,
+ (Form == X86II::MRMXm) ? 0 : Form - X86II::MRM0m, TSFlags,
+ Rex, CurByte, OS, Fixups, STI);
CurOp += X86::AddrNumOperands;
break;
}
@@ -1453,38 +1406,27 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS,
case X86II::MRM_FC: case X86II::MRM_FD: case X86II::MRM_FE:
case X86II::MRM_FF:
EmitByte(BaseOpcode, CurByte, OS);
-
- uint64_t Form = TSFlags & X86II::FormMask;
EmitByte(0xC0 + Form - X86II::MRM_C0, CurByte, OS);
break;
}
- // If there is a remaining operand, it must be a trailing immediate. Emit it
- // according to the right size for the instruction. Some instructions
- // (SSE4a extrq and insertq) have two trailing immediates.
- while (CurOp != NumOps && NumOps - CurOp <= 2) {
+ if (HasVEX_I8IMM) {
// The last source register of a 4 operand instruction in AVX is encoded
// in bits[7:4] of a immediate byte.
- if (TSFlags & X86II::VEX_I8IMM) {
- const MCOperand &MO = MI.getOperand(HasMemOp4 ? MemOp4_I8IMMOperand
- : CurOp);
- ++CurOp;
- unsigned RegNum = GetX86RegNum(MO) << 4;
- if (X86II::isX86_64ExtendedReg(MO.getReg()))
- RegNum |= 1 << 7;
- // If there is an additional 5th operand it must be an immediate, which
- // is encoded in bits[3:0]
- if (CurOp != NumOps) {
- const MCOperand &MIMM = MI.getOperand(CurOp++);
- if (MIMM.isImm()) {
- unsigned Val = MIMM.getImm();
- assert(Val < 16 && "Immediate operand value out of range");
- RegNum |= Val;
- }
- }
- EmitImmediate(MCOperand::createImm(RegNum), MI.getLoc(), 1, FK_Data_1,
- CurByte, OS, Fixups);
- } else {
+ assert(I8RegNum < 16 && "Register encoding out of range");
+ I8RegNum <<= 4;
+ if (CurOp != NumOps) {
+ unsigned Val = MI.getOperand(CurOp++).getImm();
+ assert(Val < 16 && "Immediate operand value out of range");
+ I8RegNum |= Val;
+ }
+ EmitImmediate(MCOperand::createImm(I8RegNum), MI.getLoc(), 1, FK_Data_1,
+ CurByte, OS, Fixups);
+ } else {
+ // If there is a remaining operand, it must be a trailing immediate. Emit it
+ // according to the right size for the instruction. Some instructions
+ // (SSE4a extrq and insertq) have two trailing immediates.
+ while (CurOp != NumOps && NumOps - CurOp <= 2) {
EmitImmediate(MI.getOperand(CurOp++), MI.getLoc(),
X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags),
CurByte, OS, Fixups);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
index 53a6550acdd5..311a8d677eea 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp
@@ -16,7 +16,6 @@
#include "InstPrinter/X86IntelInstPrinter.h"
#include "X86MCAsmInfo.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/MC/MCCodeGenInfo.h"
#include "llvm/MC/MCInstrAnalysis.h"
#include "llvm/MC/MCInstrInfo.h"
#include "llvm/MC/MCRegisterInfo.h"
@@ -66,12 +65,59 @@ unsigned X86_MC::getDwarfRegFlavour(const Triple &TT, bool isEH) {
return DWARFFlavour::X86_32_Generic;
}
-void X86_MC::InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI) {
+void X86_MC::initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI) {
// FIXME: TableGen these.
- for (unsigned Reg = X86::NoRegister+1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
+ for (unsigned Reg = X86::NoRegister + 1; Reg < X86::NUM_TARGET_REGS; ++Reg) {
unsigned SEH = MRI->getEncodingValue(Reg);
MRI->mapLLVMRegToSEHReg(Reg, SEH);
}
+
+ // These CodeView registers are numbered sequentially starting at value 1.
+ static const MCPhysReg LowCVRegs[] = {
+ X86::AL, X86::CL, X86::DL, X86::BL, X86::AH, X86::CH,
+ X86::DH, X86::BH, X86::AX, X86::CX, X86::DX, X86::BX,
+ X86::SP, X86::BP, X86::SI, X86::DI, X86::EAX, X86::ECX,
+ X86::EDX, X86::EBX, X86::ESP, X86::EBP, X86::ESI, X86::EDI,
+ };
+ unsigned CVLowRegStart = 1;
+ for (unsigned I = 0; I < array_lengthof(LowCVRegs); ++I)
+ MRI->mapLLVMRegToCVReg(LowCVRegs[I], I + CVLowRegStart);
+
+ MRI->mapLLVMRegToCVReg(X86::EFLAGS, 34);
+
+ // The x87 registers start at 128 and are numbered sequentially.
+ unsigned FP0Start = 128;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::FP0 + I, FP0Start + I);
+
+ // The low 8 XMM registers start at 154 and are numbered sequentially.
+ unsigned CVXMM0Start = 154;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM0 + I, CVXMM0Start + I);
+
+ // The high 8 XMM registers start at 252 and are numbered sequentially.
+ unsigned CVXMM8Start = 252;
+ for (unsigned I = 0; I < 8; ++I)
+ MRI->mapLLVMRegToCVReg(X86::XMM8 + I, CVXMM8Start + I);
+
+ // FIXME: XMM16 and above from AVX512 not yet documented.
+
+ // AMD64 registers start at 324 and count up.
+ unsigned CVX64RegStart = 324;
+ static const MCPhysReg CVX64Regs[] = {
+ X86::SIL, X86::DIL, X86::BPL, X86::SPL, X86::RAX, X86::RBX,
+ X86::RCX, X86::RDX, X86::RSI, X86::RDI, X86::RBP, X86::RSP,
+ X86::R8, X86::R9, X86::R10, X86::R11, X86::R12, X86::R13,
+ X86::R14, X86::R15, X86::R8B, X86::R9B, X86::R10B, X86::R11B,
+ X86::R12B, X86::R13B, X86::R14B, X86::R15B, X86::R8W, X86::R9W,
+ X86::R10W, X86::R11W, X86::R12W, X86::R13W, X86::R14W, X86::R15W,
+ X86::R8D, X86::R9D, X86::R10D, X86::R11D, X86::R12D, X86::R13D,
+ X86::R14D, X86::R15D, X86::YMM0, X86::YMM1, X86::YMM2, X86::YMM3,
+ X86::YMM4, X86::YMM5, X86::YMM6, X86::YMM7, X86::YMM8, X86::YMM9,
+ X86::YMM10, X86::YMM11, X86::YMM12, X86::YMM13, X86::YMM14, X86::YMM15,
+ };
+ for (unsigned I = 0; I < array_lengthof(CVX64Regs); ++I)
+ MRI->mapLLVMRegToCVReg(CVX64Regs[I], CVX64RegStart + I);
}
MCSubtargetInfo *X86_MC::createX86MCSubtargetInfo(const Triple &TT,
@@ -105,7 +151,7 @@ static MCRegisterInfo *createX86MCRegisterInfo(const Triple &TT) {
MCRegisterInfo *X = new MCRegisterInfo();
InitX86MCRegisterInfo(X, RA, X86_MC::getDwarfRegFlavour(TT, false),
X86_MC::getDwarfRegFlavour(TT, true), RA);
- X86_MC::InitLLVM2SEHRegisterMapping(X);
+ X86_MC::initLLVMToSEHAndCVRegMapping(X);
return X;
}
@@ -152,53 +198,16 @@ static MCAsmInfo *createX86MCAsmInfo(const MCRegisterInfo &MRI,
return MAI;
}
-static MCCodeGenInfo *createX86MCCodeGenInfo(const Triple &TT, Reloc::Model RM,
- CodeModel::Model CM,
- CodeGenOpt::Level OL) {
- MCCodeGenInfo *X = new MCCodeGenInfo();
-
+static void adjustCodeGenOpts(const Triple &TT, Reloc::Model RM,
+ CodeModel::Model &CM) {
bool is64Bit = TT.getArch() == Triple::x86_64;
- if (RM == Reloc::Default) {
- // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode.
- // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we
- // use static relocation model by default.
- if (TT.isOSDarwin()) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::DynamicNoPIC;
- } else if (TT.isOSWindows() && is64Bit)
- RM = Reloc::PIC_;
- else
- RM = Reloc::Static;
- }
-
- // ELF and X86-64 don't have a distinct DynamicNoPIC model. DynamicNoPIC
- // is defined as a model for code which may be used in static or dynamic
- // executables but not necessarily a shared library. On X86-32 we just
- // compile in -static mode, in x86-64 we use PIC.
- if (RM == Reloc::DynamicNoPIC) {
- if (is64Bit)
- RM = Reloc::PIC_;
- else if (!TT.isOSDarwin())
- RM = Reloc::Static;
- }
-
- // If we are on Darwin, disallow static relocation model in X86-64 mode, since
- // the Mach-O file format doesn't support it.
- if (RM == Reloc::Static && TT.isOSDarwin() && is64Bit)
- RM = Reloc::PIC_;
-
// For static codegen, if we're not already set, use Small codegen.
if (CM == CodeModel::Default)
CM = CodeModel::Small;
else if (CM == CodeModel::JITDefault)
// 64-bit JIT places everything in the same buffer except external funcs.
CM = is64Bit ? CodeModel::Large : CodeModel::Small;
-
- X->initMCCodeGenInfo(RM, CM, OL);
- return X;
}
static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
@@ -215,10 +224,6 @@ static MCInstPrinter *createX86MCInstPrinter(const Triple &T,
static MCRelocationInfo *createX86MCRelocationInfo(const Triple &TheTriple,
MCContext &Ctx) {
- if (TheTriple.isOSBinFormatMachO() && TheTriple.getArch() == Triple::x86_64)
- return createX86_64MachORelocationInfo(Ctx);
- else if (TheTriple.isOSBinFormatELF())
- return createX86_64ELFRelocationInfo(Ctx);
// Default to the stock relocation info.
return llvm::createMCRelocationInfo(TheTriple, Ctx);
}
@@ -234,7 +239,7 @@ extern "C" void LLVMInitializeX86TargetMC() {
RegisterMCAsmInfoFn X(*T, createX86MCAsmInfo);
// Register the MC codegen info.
- RegisterMCCodeGenInfoFn Y(*T, createX86MCCodeGenInfo);
+ RegisterMCAdjustCodeGenOptsFn Y(*T, adjustCodeGenOpts);
// Register the MC instruction info.
TargetRegistry::RegisterMCInstrInfo(*T, createX86MCInstrInfo);
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
index 2d2836ff07c5..ca4f0d3e17d5 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MCTargetDesc.h
@@ -14,6 +14,7 @@
#ifndef LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
#define LLVM_LIB_TARGET_X86_MCTARGETDESC_X86MCTARGETDESC_H
+#include "llvm/MC/MCStreamer.h"
#include "llvm/Support/DataTypes.h"
#include <string>
@@ -26,7 +27,6 @@ class MCObjectWriter;
class MCRegisterInfo;
class MCSubtargetInfo;
class MCRelocationInfo;
-class MCStreamer;
class Target;
class Triple;
class StringRef;
@@ -56,7 +56,7 @@ std::string ParseX86Triple(const Triple &TT);
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH);
-void InitLLVM2SEHRegisterMapping(MCRegisterInfo *MRI);
+void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI);
/// Create a X86 MCSubtargetInfo instance. This is exposed so Asm parser, etc.
/// do not need to go through TargetRegistry.
@@ -93,12 +93,6 @@ MCObjectWriter *createX86ELFObjectWriter(raw_pwrite_stream &OS, bool IsELF64,
MCObjectWriter *createX86WinCOFFObjectWriter(raw_pwrite_stream &OS,
bool Is64Bit);
-/// Construct X86-64 Mach-O relocation info.
-MCRelocationInfo *createX86_64MachORelocationInfo(MCContext &Ctx);
-
-/// Construct X86-64 ELF relocation info.
-MCRelocationInfo *createX86_64ELFRelocationInfo(MCContext &Ctx);
-
/// Returns the sub or super register of a specific X86 register.
/// e.g. getX86SubSuperRegister(X86::EAX, 16) returns X86::AX.
/// Aborts on error.
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
deleted file mode 100644
index 9bfe999424fa..000000000000
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachORelocationInfo.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-//===-- X86MachORelocationInfo.cpp ----------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#include "MCTargetDesc/X86MCTargetDesc.h"
-#include "llvm/MC/MCContext.h"
-#include "llvm/MC/MCExpr.h"
-#include "llvm/MC/MCInst.h"
-#include "llvm/MC/MCRelocationInfo.h"
-#include "llvm/MC/MCSymbol.h"
-#include "llvm/Object/MachO.h"
-
-using namespace llvm;
-using namespace object;
-using namespace MachO;
-
-namespace {
-class X86_64MachORelocationInfo : public MCRelocationInfo {
-public:
- X86_64MachORelocationInfo(MCContext &Ctx) : MCRelocationInfo(Ctx) {}
-
- const MCExpr *createExprForRelocation(RelocationRef Rel) override {
- const MachOObjectFile *Obj = cast<MachOObjectFile>(Rel.getObject());
-
- uint64_t RelType = Rel.getType();
- symbol_iterator SymI = Rel.getSymbol();
-
- ErrorOr<StringRef> SymNameOrErr = SymI->getName();
- if (std::error_code EC = SymNameOrErr.getError())
- report_fatal_error(EC.message());
- StringRef SymName = *SymNameOrErr;
- uint64_t SymAddr = SymI->getValue();
-
- any_relocation_info RE = Obj->getRelocation(Rel.getRawDataRefImpl());
- bool isPCRel = Obj->getAnyRelocationPCRel(RE);
-
- MCSymbol *Sym = Ctx.getOrCreateSymbol(SymName);
- // FIXME: check that the value is actually the same.
- if (!Sym->isVariable())
- Sym->setVariableValue(MCConstantExpr::create(SymAddr, Ctx));
- const MCExpr *Expr = nullptr;
-
- switch(RelType) {
- case X86_64_RELOC_TLV:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_TLVP, Ctx);
- break;
- case X86_64_RELOC_SIGNED_4:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(4, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_2:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(2, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_SIGNED_1:
- Expr = MCBinaryExpr::createAdd(MCSymbolRefExpr::create(Sym, Ctx),
- MCConstantExpr::create(1, Ctx),
- Ctx);
- break;
- case X86_64_RELOC_GOT_LOAD:
- Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_GOTPCREL, Ctx);
- break;
- case X86_64_RELOC_GOT:
- Expr = MCSymbolRefExpr::create(Sym, isPCRel ?
- MCSymbolRefExpr::VK_GOTPCREL :
- MCSymbolRefExpr::VK_GOT,
- Ctx);
- break;
- case X86_64_RELOC_SUBTRACTOR:
- {
- Rel.moveNext();
- any_relocation_info RENext =
- Obj->getRelocation(Rel.getRawDataRefImpl());
-
- // X86_64_SUBTRACTOR must be followed by a relocation of type
- // X86_64_RELOC_UNSIGNED.
- // NOTE: Scattered relocations don't exist on x86_64.
- unsigned RType = Obj->getAnyRelocationType(RENext);
- if (RType != X86_64_RELOC_UNSIGNED)
- report_fatal_error("Expected X86_64_RELOC_UNSIGNED after "
- "X86_64_RELOC_SUBTRACTOR.");
-
- const MCExpr *LHS = MCSymbolRefExpr::create(Sym, Ctx);
-
- symbol_iterator RSymI = Rel.getSymbol();
- uint64_t RSymAddr = RSymI->getValue();
- ErrorOr<StringRef> RSymName = RSymI->getName();
- if (std::error_code EC = RSymName.getError())
- report_fatal_error(EC.message());
-
- MCSymbol *RSym = Ctx.getOrCreateSymbol(*RSymName);
- if (!RSym->isVariable())
- RSym->setVariableValue(MCConstantExpr::create(RSymAddr, Ctx));
-
- const MCExpr *RHS = MCSymbolRefExpr::create(RSym, Ctx);
-
- Expr = MCBinaryExpr::createSub(LHS, RHS, Ctx);
- break;
- }
- default:
- Expr = MCSymbolRefExpr::create(Sym, Ctx);
- break;
- }
- return Expr;
- }
-};
-} // End unnamed namespace
-
-/// createX86_64MachORelocationInfo - Construct an X86-64 Mach-O RelocationInfo.
-MCRelocationInfo *llvm::createX86_64MachORelocationInfo(MCContext &Ctx) {
- return new X86_64MachORelocationInfo(Ctx);
-}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
index 191ebeac7265..297926ddcfda 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86MachObjectWriter.cpp
@@ -73,7 +73,9 @@ public:
static bool isFixupKindRIPRel(unsigned Kind) {
return Kind == X86::reloc_riprel_4byte ||
- Kind == X86::reloc_riprel_4byte_movq_load;
+ Kind == X86::reloc_riprel_4byte_movq_load ||
+ Kind == X86::reloc_riprel_4byte_relax ||
+ Kind == X86::reloc_riprel_4byte_relax_rex;
}
static unsigned getFixupKindLog2Size(unsigned Kind) {
@@ -87,8 +89,11 @@ static unsigned getFixupKindLog2Size(unsigned Kind) {
case FK_PCRel_4:
// FIXME: Remove these!!!
case X86::reloc_riprel_4byte:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
case X86::reloc_riprel_4byte_movq_load:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
case FK_Data_4: return 2;
case FK_Data_8: return 3;
}
diff --git a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
index bd1bc9943b6d..33376b6d1b90 100644
--- a/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
+++ b/contrib/llvm/lib/Target/X86/MCTargetDesc/X86WinCOFFObjectWriter.cpp
@@ -53,11 +53,16 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
case FK_PCRel_4:
case X86::reloc_riprel_4byte:
case X86::reloc_riprel_4byte_movq_load:
+ case X86::reloc_riprel_4byte_relax:
+ case X86::reloc_riprel_4byte_relax_rex:
return COFF::IMAGE_REL_AMD64_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_AMD64_ADDR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_AMD64_ADDR32;
case FK_Data_8:
return COFF::IMAGE_REL_AMD64_ADDR64;
@@ -76,8 +81,11 @@ unsigned X86WinCOFFObjectWriter::getRelocType(const MCValue &Target,
return COFF::IMAGE_REL_I386_REL32;
case FK_Data_4:
case X86::reloc_signed_4byte:
+ case X86::reloc_signed_4byte_relax:
if (Modifier == MCSymbolRefExpr::VK_COFF_IMGREL32)
return COFF::IMAGE_REL_I386_DIR32NB;
+ if (Modifier == MCSymbolRefExpr::VK_SECREL)
+ return COFF::IMAGE_REL_AMD64_SECREL;
return COFF::IMAGE_REL_I386_DIR32;
case FK_SecRel_2:
return COFF::IMAGE_REL_I386_SECTION;