aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/X86/AsmParser
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/X86/AsmParser')
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp274
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h10
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp319
-rw-r--r--contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h19
4 files changed, 296 insertions, 326 deletions
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
index 9eee4a0f3d82..09cc53a8e6d3 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.cpp
@@ -10,10 +10,8 @@
#include "MCTargetDesc/X86BaseInfo.h"
#include "X86AsmInstrumentation.h"
#include "X86Operand.h"
-#include "X86RegisterInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/CodeGen/MachineValueType.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h"
#include "llvm/MC/MCInst.h"
@@ -118,11 +116,6 @@ bool IsStackReg(unsigned Reg) { return Reg == X86::RSP || Reg == X86::ESP; }
bool IsSmallMemAccess(unsigned AccessSize) { return AccessSize < 8; }
-std::string FuncName(unsigned AccessSize, bool IsWrite) {
- return std::string("__asan_report_") + (IsWrite ? "store" : "load") +
- utostr(AccessSize);
-}
-
class X86AddressSanitizer : public X86AsmInstrumentation {
public:
struct RegisterContext {
@@ -136,26 +129,26 @@ public:
public:
RegisterContext(unsigned AddressReg, unsigned ShadowReg,
unsigned ScratchReg) {
- BusyRegs.push_back(convReg(AddressReg, MVT::i64));
- BusyRegs.push_back(convReg(ShadowReg, MVT::i64));
- BusyRegs.push_back(convReg(ScratchReg, MVT::i64));
+ BusyRegs.push_back(convReg(AddressReg, 64));
+ BusyRegs.push_back(convReg(ShadowReg, 64));
+ BusyRegs.push_back(convReg(ScratchReg, 64));
}
- unsigned AddressReg(MVT::SimpleValueType VT) const {
- return convReg(BusyRegs[REG_OFFSET_ADDRESS], VT);
+ unsigned AddressReg(unsigned Size) const {
+ return convReg(BusyRegs[REG_OFFSET_ADDRESS], Size);
}
- unsigned ShadowReg(MVT::SimpleValueType VT) const {
- return convReg(BusyRegs[REG_OFFSET_SHADOW], VT);
+ unsigned ShadowReg(unsigned Size) const {
+ return convReg(BusyRegs[REG_OFFSET_SHADOW], Size);
}
- unsigned ScratchReg(MVT::SimpleValueType VT) const {
- return convReg(BusyRegs[REG_OFFSET_SCRATCH], VT);
+ unsigned ScratchReg(unsigned Size) const {
+ return convReg(BusyRegs[REG_OFFSET_SCRATCH], Size);
}
void AddBusyReg(unsigned Reg) {
if (Reg != X86::NoRegister)
- BusyRegs.push_back(convReg(Reg, MVT::i64));
+ BusyRegs.push_back(convReg(Reg, 64));
}
void AddBusyRegs(const X86Operand &Op) {
@@ -163,36 +156,36 @@ public:
AddBusyReg(Op.getMemIndexReg());
}
- unsigned ChooseFrameReg(MVT::SimpleValueType VT) const {
+ unsigned ChooseFrameReg(unsigned Size) const {
static const MCPhysReg Candidates[] = { X86::RBP, X86::RAX, X86::RBX,
X86::RCX, X86::RDX, X86::RDI,
X86::RSI };
for (unsigned Reg : Candidates) {
if (!std::count(BusyRegs.begin(), BusyRegs.end(), Reg))
- return convReg(Reg, VT);
+ return convReg(Reg, Size);
}
return X86::NoRegister;
}
private:
- unsigned convReg(unsigned Reg, MVT::SimpleValueType VT) const {
- return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, VT);
+ unsigned convReg(unsigned Reg, unsigned Size) const {
+ return Reg == X86::NoRegister ? Reg : getX86SubSuperRegister(Reg, Size);
}
std::vector<unsigned> BusyRegs;
};
- X86AddressSanitizer(const MCSubtargetInfo &STI)
+ X86AddressSanitizer(const MCSubtargetInfo *&STI)
: X86AsmInstrumentation(STI), RepPrefix(false), OrigSPOffset(0) {}
- virtual ~X86AddressSanitizer() {}
+ ~X86AddressSanitizer() override {}
// X86AsmInstrumentation implementation:
- virtual void InstrumentAndEmitInstruction(const MCInst &Inst,
- OperandVector &Operands,
- MCContext &Ctx,
- const MCInstrInfo &MII,
- MCStreamer &Out) override {
+ void InstrumentAndEmitInstruction(const MCInst &Inst,
+ OperandVector &Operands,
+ MCContext &Ctx,
+ const MCInstrInfo &MII,
+ MCStreamer &Out) override {
InstrumentMOVS(Inst, Operands, Ctx, MII, Out);
if (RepPrefix)
EmitInstruction(Out, MCInstBuilder(X86::REP_PREFIX));
@@ -240,17 +233,16 @@ public:
protected:
void EmitLabel(MCStreamer &Out, MCSymbol *Label) { Out.EmitLabel(Label); }
- void EmitLEA(X86Operand &Op, MVT::SimpleValueType VT, unsigned Reg,
- MCStreamer &Out) {
- assert(VT == MVT::i32 || VT == MVT::i64);
+ void EmitLEA(X86Operand &Op, unsigned Size, unsigned Reg, MCStreamer &Out) {
+ assert(Size == 32 || Size == 64);
MCInst Inst;
- Inst.setOpcode(VT == MVT::i32 ? X86::LEA32r : X86::LEA64r);
- Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, VT)));
+ Inst.setOpcode(Size == 32 ? X86::LEA32r : X86::LEA64r);
+ Inst.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg, Size)));
Op.addMemOperands(Inst, 5);
EmitInstruction(Out, Inst);
}
- void ComputeMemOperandAddress(X86Operand &Op, MVT::SimpleValueType VT,
+ void ComputeMemOperandAddress(X86Operand &Op, unsigned Size,
unsigned Reg, MCContext &Ctx, MCStreamer &Out);
// Creates new memory operand with Displacement added to an original
@@ -261,13 +253,13 @@ protected:
MCContext &Ctx, int64_t *Residue);
bool is64BitMode() const {
- return STI.getFeatureBits()[X86::Mode64Bit];
+ return STI->getFeatureBits()[X86::Mode64Bit];
}
bool is32BitMode() const {
- return STI.getFeatureBits()[X86::Mode32Bit];
+ return STI->getFeatureBits()[X86::Mode32Bit];
}
bool is16BitMode() const {
- return STI.getFeatureBits()[X86::Mode16Bit];
+ return STI->getFeatureBits()[X86::Mode16Bit];
}
unsigned getPointerWidth() {
@@ -437,7 +429,7 @@ void X86AddressSanitizer::InstrumentMOV(const MCInst &Inst,
}
void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
- MVT::SimpleValueType VT,
+ unsigned Size,
unsigned Reg, MCContext &Ctx,
MCStreamer &Out) {
int64_t Displacement = 0;
@@ -450,14 +442,14 @@ void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
// Emit Op as is.
if (Displacement == 0) {
- EmitLEA(Op, VT, Reg, Out);
+ EmitLEA(Op, Size, Reg, Out);
return;
}
int64_t Residue;
std::unique_ptr<X86Operand> NewOp =
AddDisplacement(Op, Displacement, Ctx, &Residue);
- EmitLEA(*NewOp, VT, Reg, Out);
+ EmitLEA(*NewOp, Size, Reg, Out);
while (Residue != 0) {
const MCConstantExpr *Disp =
@@ -465,7 +457,7 @@ void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand &Op,
std::unique_ptr<X86Operand> DispOp =
X86Operand::CreateMem(getPointerWidth(), 0, Disp, Reg, 0, 1, SMLoc(),
SMLoc());
- EmitLEA(*DispOp, VT, Reg, Out);
+ EmitLEA(*DispOp, Size, Reg, Out);
Residue -= Disp->getValue();
}
}
@@ -503,16 +495,16 @@ class X86AddressSanitizer32 : public X86AddressSanitizer {
public:
static const long kShadowOffset = 0x20000000;
- X86AddressSanitizer32(const MCSubtargetInfo &STI)
+ X86AddressSanitizer32(const MCSubtargetInfo *&STI)
: X86AddressSanitizer(STI) {}
- virtual ~X86AddressSanitizer32() {}
+ ~X86AddressSanitizer32() override {}
unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
if (FrameReg == X86::NoRegister)
return FrameReg;
- return getX86SubSuperRegister(FrameReg, MVT::i32);
+ return getX86SubSuperRegister(FrameReg, 32);
}
void SpillReg(MCStreamer &Out, unsigned Reg) {
@@ -535,10 +527,10 @@ public:
OrigSPOffset += 4;
}
- virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override {
- unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
+ void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override {
+ unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
assert(LocalFrameReg != X86::NoRegister);
const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
@@ -558,24 +550,24 @@ public:
MRI->getDwarfRegNum(LocalFrameReg, true /* IsEH */));
}
- SpillReg(Out, RegCtx.AddressReg(MVT::i32));
- SpillReg(Out, RegCtx.ShadowReg(MVT::i32));
- if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
- SpillReg(Out, RegCtx.ScratchReg(MVT::i32));
+ SpillReg(Out, RegCtx.AddressReg(32));
+ SpillReg(Out, RegCtx.ShadowReg(32));
+ if (RegCtx.ScratchReg(32) != X86::NoRegister)
+ SpillReg(Out, RegCtx.ScratchReg(32));
StoreFlags(Out);
}
- virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override {
- unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i32);
+ void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override {
+ unsigned LocalFrameReg = RegCtx.ChooseFrameReg(32);
assert(LocalFrameReg != X86::NoRegister);
RestoreFlags(Out);
- if (RegCtx.ScratchReg(MVT::i32) != X86::NoRegister)
- RestoreReg(Out, RegCtx.ScratchReg(MVT::i32));
- RestoreReg(Out, RegCtx.ShadowReg(MVT::i32));
- RestoreReg(Out, RegCtx.AddressReg(MVT::i32));
+ if (RegCtx.ScratchReg(32) != X86::NoRegister)
+ RestoreReg(Out, RegCtx.ScratchReg(32));
+ RestoreReg(Out, RegCtx.ShadowReg(32));
+ RestoreReg(Out, RegCtx.AddressReg(32));
unsigned FrameReg = GetFrameReg(Ctx, Out);
if (Ctx.getRegisterInfo() && FrameReg != X86::NoRegister) {
@@ -586,18 +578,18 @@ public:
}
}
- virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
- bool IsWrite,
- const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override;
- virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
- bool IsWrite,
- const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override;
- virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
- MCStreamer &Out) override;
+ void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
+ bool IsWrite,
+ const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override;
+ void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
+ bool IsWrite,
+ const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override;
+ void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
+ MCStreamer &Out) override;
private:
void EmitCallAsanReport(unsigned AccessSize, bool IsWrite, MCContext &Ctx,
@@ -610,10 +602,11 @@ private:
.addReg(X86::ESP)
.addImm(-16));
EmitInstruction(
- Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(MVT::i32)));
+ Out, MCInstBuilder(X86::PUSH32r).addReg(RegCtx.AddressReg(32)));
- const std::string &Fn = FuncName(AccessSize, IsWrite);
- MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
+ MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
+ (IsWrite ? "store" : "load") +
+ llvm::Twine(AccessSize));
const MCSymbolRefExpr *FnExpr =
MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
EmitInstruction(Out, MCInstBuilder(X86::CALLpcrel32).addExpr(FnExpr));
@@ -623,14 +616,14 @@ private:
void X86AddressSanitizer32::InstrumentMemOperandSmall(
X86Operand &Op, unsigned AccessSize, bool IsWrite,
const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
- unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
- unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
- unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
+ unsigned AddressRegI32 = RegCtx.AddressReg(32);
+ unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
+ unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
- assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
- unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
+ assert(RegCtx.ScratchReg(32) != X86::NoRegister);
+ unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
- ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
+ ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
AddressRegI32));
@@ -673,7 +666,7 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
std::unique_ptr<X86Operand> Op(
X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
SMLoc(), SMLoc()));
- EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
+ EmitLEA(*Op, 32, ScratchRegI32, Out);
break;
}
case 4:
@@ -698,10 +691,10 @@ void X86AddressSanitizer32::InstrumentMemOperandSmall(
void X86AddressSanitizer32::InstrumentMemOperandLarge(
X86Operand &Op, unsigned AccessSize, bool IsWrite,
const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
- unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
- unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
+ unsigned AddressRegI32 = RegCtx.AddressReg(32);
+ unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
- ComputeMemOperandAddress(Op, MVT::i32, AddressRegI32, Ctx, Out);
+ ComputeMemOperandAddress(Op, 32, AddressRegI32, Ctx, Out);
EmitInstruction(Out, MCInstBuilder(X86::MOV32rr).addReg(ShadowRegI32).addReg(
AddressRegI32));
@@ -760,16 +753,16 @@ class X86AddressSanitizer64 : public X86AddressSanitizer {
public:
static const long kShadowOffset = 0x7fff8000;
- X86AddressSanitizer64(const MCSubtargetInfo &STI)
+ X86AddressSanitizer64(const MCSubtargetInfo *&STI)
: X86AddressSanitizer(STI) {}
- virtual ~X86AddressSanitizer64() {}
+ ~X86AddressSanitizer64() override {}
unsigned GetFrameReg(const MCContext &Ctx, MCStreamer &Out) {
unsigned FrameReg = GetFrameRegGeneric(Ctx, Out);
if (FrameReg == X86::NoRegister)
return FrameReg;
- return getX86SubSuperRegister(FrameReg, MVT::i64);
+ return getX86SubSuperRegister(FrameReg, 64);
}
void SpillReg(MCStreamer &Out, unsigned Reg) {
@@ -792,10 +785,10 @@ public:
OrigSPOffset += 8;
}
- virtual void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override {
- unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
+ void InstrumentMemOperandPrologue(const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override {
+ unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
assert(LocalFrameReg != X86::NoRegister);
const MCRegisterInfo *MRI = Ctx.getRegisterInfo();
@@ -816,24 +809,24 @@ public:
}
EmitAdjustRSP(Ctx, Out, -128);
- SpillReg(Out, RegCtx.ShadowReg(MVT::i64));
- SpillReg(Out, RegCtx.AddressReg(MVT::i64));
- if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
- SpillReg(Out, RegCtx.ScratchReg(MVT::i64));
+ SpillReg(Out, RegCtx.ShadowReg(64));
+ SpillReg(Out, RegCtx.AddressReg(64));
+ if (RegCtx.ScratchReg(64) != X86::NoRegister)
+ SpillReg(Out, RegCtx.ScratchReg(64));
StoreFlags(Out);
}
- virtual void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override {
- unsigned LocalFrameReg = RegCtx.ChooseFrameReg(MVT::i64);
+ void InstrumentMemOperandEpilogue(const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override {
+ unsigned LocalFrameReg = RegCtx.ChooseFrameReg(64);
assert(LocalFrameReg != X86::NoRegister);
RestoreFlags(Out);
- if (RegCtx.ScratchReg(MVT::i64) != X86::NoRegister)
- RestoreReg(Out, RegCtx.ScratchReg(MVT::i64));
- RestoreReg(Out, RegCtx.AddressReg(MVT::i64));
- RestoreReg(Out, RegCtx.ShadowReg(MVT::i64));
+ if (RegCtx.ScratchReg(64) != X86::NoRegister)
+ RestoreReg(Out, RegCtx.ScratchReg(64));
+ RestoreReg(Out, RegCtx.AddressReg(64));
+ RestoreReg(Out, RegCtx.ShadowReg(64));
EmitAdjustRSP(Ctx, Out, 128);
unsigned FrameReg = GetFrameReg(Ctx, Out);
@@ -845,18 +838,18 @@ public:
}
}
- virtual void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
- bool IsWrite,
- const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override;
- virtual void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
- bool IsWrite,
- const RegisterContext &RegCtx,
- MCContext &Ctx,
- MCStreamer &Out) override;
- virtual void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
- MCStreamer &Out) override;
+ void InstrumentMemOperandSmall(X86Operand &Op, unsigned AccessSize,
+ bool IsWrite,
+ const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override;
+ void InstrumentMemOperandLarge(X86Operand &Op, unsigned AccessSize,
+ bool IsWrite,
+ const RegisterContext &RegCtx,
+ MCContext &Ctx,
+ MCStreamer &Out) override;
+ void InstrumentMOVSImpl(unsigned AccessSize, MCContext &Ctx,
+ MCStreamer &Out) override;
private:
void EmitAdjustRSP(MCContext &Ctx, MCStreamer &Out, long Offset) {
@@ -864,7 +857,7 @@ private:
std::unique_ptr<X86Operand> Op(
X86Operand::CreateMem(getPointerWidth(), 0, Disp, X86::RSP, 0, 1,
SMLoc(), SMLoc()));
- EmitLEA(*Op, MVT::i64, X86::RSP, Out);
+ EmitLEA(*Op, 64, X86::RSP, Out);
OrigSPOffset += Offset;
}
@@ -878,12 +871,13 @@ private:
.addReg(X86::RSP)
.addImm(-16));
- if (RegCtx.AddressReg(MVT::i64) != X86::RDI) {
+ if (RegCtx.AddressReg(64) != X86::RDI) {
EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(X86::RDI).addReg(
- RegCtx.AddressReg(MVT::i64)));
+ RegCtx.AddressReg(64)));
}
- const std::string &Fn = FuncName(AccessSize, IsWrite);
- MCSymbol *FnSym = Ctx.getOrCreateSymbol(StringRef(Fn));
+ MCSymbol *FnSym = Ctx.getOrCreateSymbol(llvm::Twine("__asan_report_") +
+ (IsWrite ? "store" : "load") +
+ llvm::Twine(AccessSize));
const MCSymbolRefExpr *FnExpr =
MCSymbolRefExpr::create(FnSym, MCSymbolRefExpr::VK_PLT, Ctx);
EmitInstruction(Out, MCInstBuilder(X86::CALL64pcrel32).addExpr(FnExpr));
@@ -893,16 +887,16 @@ private:
void X86AddressSanitizer64::InstrumentMemOperandSmall(
X86Operand &Op, unsigned AccessSize, bool IsWrite,
const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
- unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
- unsigned AddressRegI32 = RegCtx.AddressReg(MVT::i32);
- unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
- unsigned ShadowRegI32 = RegCtx.ShadowReg(MVT::i32);
- unsigned ShadowRegI8 = RegCtx.ShadowReg(MVT::i8);
+ unsigned AddressRegI64 = RegCtx.AddressReg(64);
+ unsigned AddressRegI32 = RegCtx.AddressReg(32);
+ unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
+ unsigned ShadowRegI32 = RegCtx.ShadowReg(32);
+ unsigned ShadowRegI8 = RegCtx.ShadowReg(8);
- assert(RegCtx.ScratchReg(MVT::i32) != X86::NoRegister);
- unsigned ScratchRegI32 = RegCtx.ScratchReg(MVT::i32);
+ assert(RegCtx.ScratchReg(32) != X86::NoRegister);
+ unsigned ScratchRegI32 = RegCtx.ScratchReg(32);
- ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
+ ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
AddressRegI64));
@@ -944,7 +938,7 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
std::unique_ptr<X86Operand> Op(
X86Operand::CreateMem(getPointerWidth(), 0, Disp, ScratchRegI32, 0, 1,
SMLoc(), SMLoc()));
- EmitLEA(*Op, MVT::i32, ScratchRegI32, Out);
+ EmitLEA(*Op, 32, ScratchRegI32, Out);
break;
}
case 4:
@@ -969,10 +963,10 @@ void X86AddressSanitizer64::InstrumentMemOperandSmall(
void X86AddressSanitizer64::InstrumentMemOperandLarge(
X86Operand &Op, unsigned AccessSize, bool IsWrite,
const RegisterContext &RegCtx, MCContext &Ctx, MCStreamer &Out) {
- unsigned AddressRegI64 = RegCtx.AddressReg(MVT::i64);
- unsigned ShadowRegI64 = RegCtx.ShadowReg(MVT::i64);
+ unsigned AddressRegI64 = RegCtx.AddressReg(64);
+ unsigned ShadowRegI64 = RegCtx.ShadowReg(64);
- ComputeMemOperandAddress(Op, MVT::i64, AddressRegI64, Ctx, Out);
+ ComputeMemOperandAddress(Op, 64, AddressRegI64, Ctx, Out);
EmitInstruction(Out, MCInstBuilder(X86::MOV64rr).addReg(ShadowRegI64).addReg(
AddressRegI64));
@@ -1030,7 +1024,7 @@ void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize,
} // End anonymous namespace
-X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo &STI)
+X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo *&STI)
: STI(STI), InitialFrameReg(0) {}
X86AsmInstrumentation::~X86AsmInstrumentation() {}
@@ -1043,7 +1037,7 @@ void X86AsmInstrumentation::InstrumentAndEmitInstruction(
void X86AsmInstrumentation::EmitInstruction(MCStreamer &Out,
const MCInst &Inst) {
- Out.EmitInstruction(Inst, STI);
+ Out.EmitInstruction(Inst, *STI);
}
unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
@@ -1067,17 +1061,17 @@ unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext &Ctx,
X86AsmInstrumentation *
CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
- const MCContext &Ctx, const MCSubtargetInfo &STI) {
- Triple T(STI.getTargetTriple());
+ const MCContext &Ctx, const MCSubtargetInfo *&STI) {
+ Triple T(STI->getTargetTriple());
const bool hasCompilerRTSupport = T.isOSLinux();
if (ClAsanInstrumentAssembly && hasCompilerRTSupport &&
MCOptions.SanitizeAddress) {
- if (STI.getFeatureBits()[X86::Mode32Bit] != 0)
+ if (STI->getFeatureBits()[X86::Mode32Bit] != 0)
return new X86AddressSanitizer32(STI);
- if (STI.getFeatureBits()[X86::Mode64Bit] != 0)
+ if (STI->getFeatureBits()[X86::Mode64Bit] != 0)
return new X86AddressSanitizer64(STI);
}
return new X86AsmInstrumentation(STI);
}
-} // End llvm namespace
+} // end llvm namespace
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
index 19ebcc44f61e..470ceadb0aa6 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmInstrumentation.h
@@ -28,7 +28,8 @@ class X86AsmInstrumentation;
X86AsmInstrumentation *
CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
- const MCContext &Ctx, const MCSubtargetInfo &STI);
+ const MCContext &Ctx,
+ const MCSubtargetInfo *&STI);
class X86AsmInstrumentation {
public:
@@ -48,15 +49,16 @@ public:
protected:
friend X86AsmInstrumentation *
CreateX86AsmInstrumentation(const MCTargetOptions &MCOptions,
- const MCContext &Ctx, const MCSubtargetInfo &STI);
+ const MCContext &Ctx,
+ const MCSubtargetInfo *&STI);
- X86AsmInstrumentation(const MCSubtargetInfo &STI);
+ X86AsmInstrumentation(const MCSubtargetInfo *&STI);
unsigned GetFrameRegGeneric(const MCContext &Ctx, MCStreamer &Out);
void EmitInstruction(MCStreamer &Out, const MCInst &Inst);
- const MCSubtargetInfo &STI;
+ const MCSubtargetInfo *&STI;
unsigned InitialFrameReg;
};
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
index bca059d8c383..4d8ffac1a82b 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp
@@ -11,7 +11,6 @@
#include "X86AsmInstrumentation.h"
#include "X86AsmParserCommon.h"
#include "X86Operand.h"
-#include "X86ISelLowering.h"
#include "llvm/ADT/APFloat.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
@@ -26,6 +25,7 @@
#include "llvm/MC/MCParser/MCAsmParser.h"
#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSubtargetInfo.h"
#include "llvm/MC/MCSymbol.h"
@@ -57,10 +57,10 @@ static const char OpPrecedence[] = {
};
class X86AsmParser : public MCTargetAsmParser {
- MCSubtargetInfo &STI;
const MCInstrInfo &MII;
ParseInstructionInfo *InstInfo;
std::unique_ptr<X86AsmInstrumentation> Instrumentation;
+
private:
SMLoc consumeToken() {
MCAsmParser &Parser = getParser();
@@ -154,6 +154,7 @@ private:
// Push the new operator.
InfixOperatorStack.push_back(Op);
}
+
int64_t execute() {
// Push any remaining operators onto the postfix stack.
while (!InfixOperatorStack.empty()) {
@@ -268,6 +269,7 @@ private:
bool StopOnLBrac, AddImmPrefix;
InfixCalculator IC;
InlineAsmIdentifierInfo Info;
+
public:
IntelExprStateMachine(int64_t imm, bool stoponlbrac, bool addimmprefix) :
State(IES_PLUS), PrevState(IES_ERROR), BaseReg(0), IndexReg(0), TmpReg(0),
@@ -712,10 +714,10 @@ private:
SMLoc End, unsigned Size, StringRef Identifier,
InlineAsmIdentifierInfo &Info);
+ bool parseDirectiveEven(SMLoc L);
bool ParseDirectiveWord(unsigned Size, SMLoc L);
bool ParseDirectiveCode(StringRef IDVal, SMLoc L);
- bool validateInstruction(MCInst &Inst, const OperandVector &Ops);
bool processInstruction(MCInst &Inst, const OperandVector &Ops);
/// Wrapper around MCStreamer::EmitInstruction(). Possibly adds
@@ -758,23 +760,24 @@ private:
bool is64BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return STI.getFeatureBits()[X86::Mode64Bit];
+ return getSTI().getFeatureBits()[X86::Mode64Bit];
}
bool is32BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return STI.getFeatureBits()[X86::Mode32Bit];
+ return getSTI().getFeatureBits()[X86::Mode32Bit];
}
bool is16BitMode() const {
// FIXME: Can tablegen auto-generate this?
- return STI.getFeatureBits()[X86::Mode16Bit];
+ return getSTI().getFeatureBits()[X86::Mode16Bit];
}
void SwitchMode(unsigned mode) {
+ MCSubtargetInfo &STI = copySTI();
FeatureBitset AllModes({X86::Mode64Bit, X86::Mode32Bit, X86::Mode16Bit});
FeatureBitset OldMode = STI.getFeatureBits() & AllModes;
unsigned FB = ComputeAvailableFeatures(
STI.ToggleFeature(OldMode.flip(mode)));
setAvailableFeatures(FB);
-
+
assert(FeatureBitset({mode}) == (STI.getFeatureBits() & AllModes));
}
@@ -798,12 +801,12 @@ private:
/// }
public:
- X86AsmParser(MCSubtargetInfo &sti, MCAsmParser &Parser,
+ X86AsmParser(const MCSubtargetInfo &sti, MCAsmParser &Parser,
const MCInstrInfo &mii, const MCTargetOptions &Options)
- : MCTargetAsmParser(), STI(sti), MII(mii), InstInfo(nullptr) {
+ : MCTargetAsmParser(Options, sti), MII(mii), InstInfo(nullptr) {
// Initialize the set of available features.
- setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
+ setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Instrumentation.reset(
CreateX86AsmInstrumentation(Options, Parser.getContext(), STI));
}
@@ -912,6 +915,11 @@ bool X86AsmParser::ParseRegister(unsigned &RegNo,
if (RegNo == 0)
RegNo = MatchRegisterName(Tok.getString().lower());
+ // The "flags" register cannot be referenced directly.
+ // Treat it as an identifier instead.
+ if (isParsingInlineAsm() && isParsingIntelSyntax() && RegNo == X86::EFLAGS)
+ RegNo = 0;
+
if (!is64BitMode()) {
// FIXME: This should be done using Requires<Not64BitMode> and
// Requires<In64BitMode> so "eiz" usage in 64-bit instructions can be also
@@ -1042,8 +1050,11 @@ static unsigned getIntelMemOperandSize(StringRef OpStr) {
.Cases("BYTE", "byte", 8)
.Cases("WORD", "word", 16)
.Cases("DWORD", "dword", 32)
+ .Cases("FWORD", "fword", 48)
.Cases("QWORD", "qword", 64)
+ .Cases("MMWORD","mmword", 64)
.Cases("XWORD", "xword", 80)
+ .Cases("TBYTE", "tbyte", 80)
.Cases("XMMWORD", "xmmword", 128)
.Cases("YMMWORD", "ymmword", 256)
.Cases("ZMMWORD", "zmmword", 512)
@@ -1062,8 +1073,8 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
// Insert an explicit size if the user didn't have one.
if (!Size) {
Size = getPointerWidth();
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, Start,
- /*Len=*/0, Size));
+ InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
+ /*Len=*/0, Size);
}
// Create an absolute memory reference in order to match against
@@ -1082,8 +1093,8 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
if (!Size) {
Size = Info.Type * 8; // Size is in terms of bits in this context.
if (Size)
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_SizeDirective, Start,
- /*Len=*/0, Size));
+ InstInfo->AsmRewrites->emplace_back(AOK_SizeDirective, Start,
+ /*Len=*/0, Size);
}
}
@@ -1097,13 +1108,13 @@ std::unique_ptr<X86Operand> X86AsmParser::CreateMemForInlineAsm(
}
static void
-RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
+RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> &AsmRewrites,
StringRef SymName, int64_t ImmDisp,
int64_t FinalImmDisp, SMLoc &BracLoc,
SMLoc &StartInBrac, SMLoc &End) {
// Remove the '[' and ']' from the IR string.
- AsmRewrites->push_back(AsmRewrite(AOK_Skip, BracLoc, 1));
- AsmRewrites->push_back(AsmRewrite(AOK_Skip, End, 1));
+ AsmRewrites.emplace_back(AOK_Skip, BracLoc, 1);
+ AsmRewrites.emplace_back(AOK_Skip, End, 1);
// If ImmDisp is non-zero, then we parsed a displacement before the
// bracketed expression (i.e., ImmDisp [ BaseReg + Scale*IndexReg + Disp])
@@ -1114,15 +1125,14 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
// We have an immediate displacement before the bracketed expression.
// Adjust this to match the final immediate displacement.
bool Found = false;
- for (SmallVectorImpl<AsmRewrite>::iterator I = AsmRewrites->begin(),
- E = AsmRewrites->end(); I != E; ++I) {
- if ((*I).Loc.getPointer() > BracLoc.getPointer())
+ for (AsmRewrite &AR : AsmRewrites) {
+ if (AR.Loc.getPointer() > BracLoc.getPointer())
continue;
- if ((*I).Kind == AOK_ImmPrefix || (*I).Kind == AOK_Imm) {
+ if (AR.Kind == AOK_ImmPrefix || AR.Kind == AOK_Imm) {
assert (!Found && "ImmDisp already rewritten.");
- (*I).Kind = AOK_Imm;
- (*I).Len = BracLoc.getPointer() - (*I).Loc.getPointer();
- (*I).Val = FinalImmDisp;
+ AR.Kind = AOK_Imm;
+ AR.Len = BracLoc.getPointer() - AR.Loc.getPointer();
+ AR.Val = FinalImmDisp;
Found = true;
break;
}
@@ -1133,28 +1143,27 @@ RewriteIntelBracExpression(SmallVectorImpl<AsmRewrite> *AsmRewrites,
// We have a symbolic and an immediate displacement, but no displacement
// before the bracketed expression. Put the immediate displacement
// before the bracketed expression.
- AsmRewrites->push_back(AsmRewrite(AOK_Imm, BracLoc, 0, FinalImmDisp));
+ AsmRewrites.emplace_back(AOK_Imm, BracLoc, 0, FinalImmDisp);
}
}
// Remove all the ImmPrefix rewrites within the brackets.
- for (SmallVectorImpl<AsmRewrite>::iterator I = AsmRewrites->begin(),
- E = AsmRewrites->end(); I != E; ++I) {
- if ((*I).Loc.getPointer() < StartInBrac.getPointer())
+ for (AsmRewrite &AR : AsmRewrites) {
+ if (AR.Loc.getPointer() < StartInBrac.getPointer())
continue;
- if ((*I).Kind == AOK_ImmPrefix)
- (*I).Kind = AOK_Delete;
+ if (AR.Kind == AOK_ImmPrefix)
+ AR.Kind = AOK_Delete;
}
const char *SymLocPtr = SymName.data();
// Skip everything before the symbol.
if (unsigned Len = SymLocPtr - StartInBrac.getPointer()) {
assert(Len > 0 && "Expected a non-negative length.");
- AsmRewrites->push_back(AsmRewrite(AOK_Skip, StartInBrac, Len));
+ AsmRewrites.emplace_back(AOK_Skip, StartInBrac, Len);
}
// Skip everything after the symbol.
if (unsigned Len = End.getPointer() - (SymLocPtr + SymName.size())) {
SMLoc Loc = SMLoc::getFromPointer(SymLocPtr + SymName.size());
assert(Len > 0 && "Expected a non-negative length.");
- AsmRewrites->push_back(AsmRewrite(AOK_Skip, Loc, Len));
+ AsmRewrites.emplace_back(AOK_Skip, Loc, Len);
}
}
@@ -1162,6 +1171,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
MCAsmParser &Parser = getParser();
const AsmToken &Tok = Parser.getTok();
+ AsmToken::TokenKind PrevTK = AsmToken::Error;
bool Done = false;
while (!Done) {
bool UpdateLocLex = true;
@@ -1205,7 +1215,8 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
return Error(Tok.getLoc(), "Unexpected identifier!");
} else {
// This is a dot operator, not an adjacent identifier.
- if (Identifier.find('.') != StringRef::npos) {
+ if (Identifier.find('.') != StringRef::npos &&
+ PrevTK == AsmToken::RBrac) {
return false;
} else {
InlineAsmIdentifierInfo &Info = SM.getIdentifierInfo();
@@ -1223,8 +1234,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
case AsmToken::Integer: {
StringRef ErrMsg;
if (isParsingInlineAsm() && SM.getAddImmPrefix())
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix,
- Tok.getLoc()));
+ InstInfo->AsmRewrites->emplace_back(AOK_ImmPrefix, Tok.getLoc());
// Look for 'b' or 'f' following an Integer as a directional label
SMLoc Loc = getTok().getLoc();
int64_t IntVal = getTok().getIntVal();
@@ -1237,7 +1247,7 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
getContext().getDirectionalLocalSymbol(IntVal, IDVal == "b");
MCSymbolRefExpr::VariantKind Variant = MCSymbolRefExpr::VK_None;
const MCExpr *Val =
- MCSymbolRefExpr::create(Sym, Variant, getContext());
+ MCSymbolRefExpr::create(Sym, Variant, getContext());
if (IDVal == "b" && Sym->isUndefined())
return Error(Loc, "invalid reference to undefined symbol");
StringRef Identifier = Sym->getName();
@@ -1275,6 +1285,8 @@ bool X86AsmParser::ParseIntelExpression(IntelExprStateMachine &SM, SMLoc &End) {
if (!Done && UpdateLocLex)
End = consumeToken();
+
+ PrevTK = TK;
}
return false;
}
@@ -1302,7 +1314,7 @@ X86AsmParser::ParseIntelBracExpression(unsigned SegReg, SMLoc Start,
// A symbolic displacement.
Disp = Sym;
if (isParsingInlineAsm())
- RewriteIntelBracExpression(InstInfo->AsmRewrites, SM.getSymName(),
+ RewriteIntelBracExpression(*InstInfo->AsmRewrites, SM.getSymName(),
ImmDisp, SM.getImm(), BracLoc, StartInBrac,
End);
}
@@ -1359,7 +1371,7 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
InlineAsmIdentifierInfo &Info,
bool IsUnevaluatedOperand, SMLoc &End) {
MCAsmParser &Parser = getParser();
- assert (isParsingInlineAsm() && "Expected to be parsing inline assembly.");
+ assert(isParsingInlineAsm() && "Expected to be parsing inline assembly.");
Val = nullptr;
StringRef LineBuf(Identifier.data());
@@ -1372,15 +1384,17 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
// Advance the token stream until the end of the current token is
// after the end of what the frontend claimed.
const char *EndPtr = Tok.getLoc().getPointer() + LineBuf.size();
- while (true) {
+ do {
End = Tok.getEndLoc();
getLexer().Lex();
-
- assert(End.getPointer() <= EndPtr && "frontend claimed part of a token?");
- if (End.getPointer() == EndPtr) break;
- }
+ } while (End.getPointer() < EndPtr);
Identifier = LineBuf;
+ // The frontend should end parsing on an assembler token boundary, unless it
+ // failed parsing.
+ assert((End.getPointer() == EndPtr || !Result) &&
+ "frontend claimed part of a token?");
+
// If the identifier lookup was unsuccessful, assume that we are dealing with
// a label.
if (!Result) {
@@ -1389,9 +1403,8 @@ bool X86AsmParser::ParseIntelIdentifier(const MCExpr *&Val,
Loc, false);
assert(InternalName.size() && "We should have an internal name here.");
// Push a rewrite for replacing the identifier name with the internal name.
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Label, Loc,
- Identifier.size(),
- InternalName));
+ InstInfo->AsmRewrites->emplace_back(AOK_Label, Loc, Identifier.size(),
+ InternalName);
}
// Create the symbol reference.
@@ -1418,8 +1431,7 @@ X86AsmParser::ParseIntelSegmentOverride(unsigned SegReg, SMLoc Start,
AsmToken ImmDispToken = Parser.Lex(); // Eat the integer.
if (isParsingInlineAsm())
- InstInfo->AsmRewrites->push_back(
- AsmRewrite(AOK_ImmPrefix, ImmDispToken.getLoc()));
+ InstInfo->AsmRewrites->emplace_back(AOK_ImmPrefix, ImmDispToken.getLoc());
if (getLexer().isNot(AsmToken::LBrac)) {
// An immediate following a 'segment register', 'colon' token sequence can
@@ -1588,8 +1600,7 @@ bool X86AsmParser::ParseIntelDotOperator(const MCExpr *Disp,
SMLoc Loc = SMLoc::getFromPointer(DotDispStr.data());
unsigned Len = DotDispStr.size();
unsigned Val = OrigDispVal + DotDispVal;
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_DotOperator, Loc, Len,
- Val));
+ InstInfo->AsmRewrites->emplace_back(AOK_DotOperator, Loc, Len, Val);
}
NewDisp = MCConstantExpr::create(OrigDispVal + DotDispVal, getContext());
@@ -1613,7 +1624,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOffsetOfOperator() {
return nullptr;
// Don't emit the offset operator.
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Skip, OffsetOfLoc, 7));
+ InstInfo->AsmRewrites->emplace_back(AOK_Skip, OffsetOfLoc, 7);
// The offset operator will have an 'r' constraint, thus we need to create
// register operand to ensure proper matching. Just pick a GPR based on
@@ -1664,7 +1675,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperator(unsigned OpKind) {
// Rewrite the type operator and the C or C++ type or variable in terms of an
// immediate. E.g. TYPE foo -> $$4
unsigned Len = End.getPointer() - TypeLoc.getPointer();
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, TypeLoc, Len, CVal));
+ InstInfo->AsmRewrites->emplace_back(AOK_Imm, TypeLoc, Len, CVal);
const MCExpr *Imm = MCConstantExpr::create(CVal, getContext());
return X86Operand::CreateImm(Imm, Start, End);
@@ -1688,12 +1699,14 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
return ParseIntelOperator(IOK_TYPE);
}
+ bool PtrInOperand = false;
unsigned Size = getIntelMemOperandSize(Tok.getString());
if (Size) {
Parser.Lex(); // Eat operand size (e.g., byte, word).
if (Tok.getString() != "PTR" && Tok.getString() != "ptr")
return ErrorOperand(Tok.getLoc(), "Expected 'PTR' or 'ptr' token!");
Parser.Lex(); // Eat ptr.
+ PtrInOperand = true;
}
Start = Tok.getLoc();
@@ -1711,10 +1724,10 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
unsigned Len = Tok.getLoc().getPointer() - Start.getPointer();
if (StartTok.getString().size() == Len)
// Just add a prefix if this wasn't a complex immediate expression.
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_ImmPrefix, Start));
+ InstInfo->AsmRewrites->emplace_back(AOK_ImmPrefix, Start);
else
// Otherwise, rewrite the complex expression as a single immediate.
- InstInfo->AsmRewrites->push_back(AsmRewrite(AOK_Imm, Start, Len, Imm));
+ InstInfo->AsmRewrites->emplace_back(AOK_Imm, Start, Len, Imm);
}
if (getLexer().isNot(AsmToken::LBrac)) {
@@ -1740,7 +1753,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
}
// rounding mode token
- if (STI.getFeatureBits()[X86::FeatureAVX512] &&
+ if (getSTI().getFeatureBits()[X86::FeatureAVX512] &&
getLexer().is(AsmToken::LCurly))
return ParseRoundingModeOp(Start, End);
@@ -1749,9 +1762,16 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseIntelOperand() {
if (!ParseRegister(RegNo, Start, End)) {
// If this is a segment register followed by a ':', then this is the start
// of a segment override, otherwise this is a normal register reference.
- if (getLexer().isNot(AsmToken::Colon))
+ // In case it is a normal register and there is ptr in the operand this
+ // is an error
+ if (getLexer().isNot(AsmToken::Colon)){
+ if (PtrInOperand){
+ return ErrorOperand(Start, "expected memory operand after "
+ "'ptr', found register operand instead");
+ }
return X86Operand::CreateReg(RegNo, Start, End);
-
+ }
+
return ParseIntelSegmentOverride(/*SegReg=*/RegNo, Start, Size);
}
@@ -1798,7 +1818,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
}
case AsmToken::LCurly:{
SMLoc Start = Parser.getTok().getLoc(), End;
- if (STI.getFeatureBits()[X86::FeatureAVX512])
+ if (getSTI().getFeatureBits()[X86::FeatureAVX512])
return ParseRoundingModeOp(Start, End);
return ErrorOperand(Start, "unknown token in expression");
}
@@ -1808,7 +1828,7 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseATTOperand() {
bool X86AsmParser::HandleAVX512Operand(OperandVector &Operands,
const MCParsedAsmOperand &Op) {
MCAsmParser &Parser = getParser();
- if(STI.getFeatureBits()[X86::FeatureAVX512]) {
+ if(getSTI().getFeatureBits()[X86::FeatureAVX512]) {
if (getLexer().is(AsmToken::LCurly)) {
// Eat "{" and mark the current place.
const SMLoc consumedToken = consumeToken();
@@ -1983,12 +2003,13 @@ std::unique_ptr<X86Operand> X86AsmParser::ParseMemOperand(unsigned SegReg,
}
// Validate the scale amount.
- if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
+ if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) &&
ScaleVal != 1) {
Error(Loc, "scale factor in 16-bit address must be 1");
return nullptr;
- }
- if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){
+ }
+ if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 &&
+ ScaleVal != 8) {
Error(Loc, "scale factor in address must be 1, 2, 4 or 8");
return nullptr;
}
@@ -2175,7 +2196,6 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
Name == "repne" || Name == "repnz" ||
Name == "rex64" || Name == "data16";
-
// This does the actual operand parsing. Don't parse any more if we have a
// prefix juxtaposed with an operation like "lock incl 4(%rax)", because we
// just want to parse the "lock" as the first instruction and the "incl" as
@@ -2213,6 +2233,20 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
(isPrefix && getLexer().is(AsmToken::Slash)))
Parser.Lex();
+ // This is for gas compatibility and cannot be done in td.
+ // Adding "p" for some floating point with no argument.
+ // For example: fsub --> fsubp
+ bool IsFp =
+ Name == "fsub" || Name == "fdiv" || Name == "fsubr" || Name == "fdivr";
+ if (IsFp && Operands.size() == 1) {
+ const char *Repl = StringSwitch<const char *>(Name)
+ .Case("fsub", "fsubp")
+ .Case("fdiv", "fdivp")
+ .Case("fsubr", "fsubrp")
+ .Case("fdivr", "fdivrp");
+ static_cast<X86Operand &>(*Operands[0]).setTokenValue(Repl);
+ }
+
// This is a terrible hack to handle "out[bwl]? %al, (%dx)" ->
// "outb %al, %dx". Out doesn't take a memory form, but this is a widely
// documented form in various unofficial manuals, so a lot of code uses it.
@@ -2242,9 +2276,8 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// Append default arguments to "ins[bwld]"
if (Name.startswith("ins") && Operands.size() == 1 &&
- (Name == "insb" || Name == "insw" || Name == "insl" ||
- Name == "insd" )) {
- AddDefaultSrcDestOperands(Operands,
+ (Name == "insb" || Name == "insw" || Name == "insl" || Name == "insd")) {
+ AddDefaultSrcDestOperands(Operands,
X86Operand::CreateReg(X86::DX, NameLoc, NameLoc),
DefaultMemDIOperand(NameLoc));
}
@@ -2346,98 +2379,21 @@ bool X86AsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
// instalias with an immediate operand yet.
if (Name == "int" && Operands.size() == 2) {
X86Operand &Op1 = static_cast<X86Operand &>(*Operands[1]);
- if (Op1.isImm() && isa<MCConstantExpr>(Op1.getImm()) &&
- cast<MCConstantExpr>(Op1.getImm())->getValue() == 3) {
- Operands.erase(Operands.begin() + 1);
- static_cast<X86Operand &>(*Operands[0]).setTokenValue("int3");
- }
+ if (Op1.isImm())
+ if (auto *CE = dyn_cast<MCConstantExpr>(Op1.getImm()))
+ if (CE->getValue() == 3) {
+ Operands.erase(Operands.begin() + 1);
+ static_cast<X86Operand &>(*Operands[0]).setTokenValue("int3");
+ }
}
return false;
}
-static bool convertToSExti8(MCInst &Inst, unsigned Opcode, unsigned Reg,
- bool isCmp) {
- MCInst TmpInst;
- TmpInst.setOpcode(Opcode);
- if (!isCmp)
- TmpInst.addOperand(MCOperand::createReg(Reg));
- TmpInst.addOperand(MCOperand::createReg(Reg));
- TmpInst.addOperand(Inst.getOperand(0));
- Inst = TmpInst;
- return true;
-}
-
-static bool convert16i16to16ri8(MCInst &Inst, unsigned Opcode,
- bool isCmp = false) {
- if (!Inst.getOperand(0).isImm() ||
- !isImmSExti16i8Value(Inst.getOperand(0).getImm()))
- return false;
-
- return convertToSExti8(Inst, Opcode, X86::AX, isCmp);
-}
-
-static bool convert32i32to32ri8(MCInst &Inst, unsigned Opcode,
- bool isCmp = false) {
- if (!Inst.getOperand(0).isImm() ||
- !isImmSExti32i8Value(Inst.getOperand(0).getImm()))
- return false;
-
- return convertToSExti8(Inst, Opcode, X86::EAX, isCmp);
-}
-
-static bool convert64i32to64ri8(MCInst &Inst, unsigned Opcode,
- bool isCmp = false) {
- if (!Inst.getOperand(0).isImm() ||
- !isImmSExti64i8Value(Inst.getOperand(0).getImm()))
- return false;
-
- return convertToSExti8(Inst, Opcode, X86::RAX, isCmp);
-}
-
-bool X86AsmParser::validateInstruction(MCInst &Inst, const OperandVector &Ops) {
- switch (Inst.getOpcode()) {
- default: return true;
- case X86::INT:
- X86Operand &Op = static_cast<X86Operand &>(*Ops[1]);
- assert(Op.isImm() && "expected immediate");
- int64_t Res;
- if (!Op.getImm()->evaluateAsAbsolute(Res) || Res > 255) {
- Error(Op.getStartLoc(), "interrupt vector must be in range [0-255]");
- return false;
- }
- return true;
- }
- llvm_unreachable("handle the instruction appropriately");
-}
-
bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
switch (Inst.getOpcode()) {
default: return false;
- case X86::AND16i16: return convert16i16to16ri8(Inst, X86::AND16ri8);
- case X86::AND32i32: return convert32i32to32ri8(Inst, X86::AND32ri8);
- case X86::AND64i32: return convert64i32to64ri8(Inst, X86::AND64ri8);
- case X86::XOR16i16: return convert16i16to16ri8(Inst, X86::XOR16ri8);
- case X86::XOR32i32: return convert32i32to32ri8(Inst, X86::XOR32ri8);
- case X86::XOR64i32: return convert64i32to64ri8(Inst, X86::XOR64ri8);
- case X86::OR16i16: return convert16i16to16ri8(Inst, X86::OR16ri8);
- case X86::OR32i32: return convert32i32to32ri8(Inst, X86::OR32ri8);
- case X86::OR64i32: return convert64i32to64ri8(Inst, X86::OR64ri8);
- case X86::CMP16i16: return convert16i16to16ri8(Inst, X86::CMP16ri8, true);
- case X86::CMP32i32: return convert32i32to32ri8(Inst, X86::CMP32ri8, true);
- case X86::CMP64i32: return convert64i32to64ri8(Inst, X86::CMP64ri8, true);
- case X86::ADD16i16: return convert16i16to16ri8(Inst, X86::ADD16ri8);
- case X86::ADD32i32: return convert32i32to32ri8(Inst, X86::ADD32ri8);
- case X86::ADD64i32: return convert64i32to64ri8(Inst, X86::ADD64ri8);
- case X86::SUB16i16: return convert16i16to16ri8(Inst, X86::SUB16ri8);
- case X86::SUB32i32: return convert32i32to32ri8(Inst, X86::SUB32ri8);
- case X86::SUB64i32: return convert64i32to64ri8(Inst, X86::SUB64ri8);
- case X86::ADC16i16: return convert16i16to16ri8(Inst, X86::ADC16ri8);
- case X86::ADC32i32: return convert32i32to32ri8(Inst, X86::ADC32ri8);
- case X86::ADC64i32: return convert64i32to64ri8(Inst, X86::ADC64ri8);
- case X86::SBB16i16: return convert16i16to16ri8(Inst, X86::SBB16ri8);
- case X86::SBB32i32: return convert32i32to32ri8(Inst, X86::SBB32ri8);
- case X86::SBB64i32: return convert64i32to64ri8(Inst, X86::SBB64ri8);
+ case X86::VMOVZPQILo2PQIrr:
case X86::VMOVAPDrr:
case X86::VMOVAPDYrr:
case X86::VMOVAPSrr:
@@ -2457,18 +2413,19 @@ bool X86AsmParser::processInstruction(MCInst &Inst, const OperandVector &Ops) {
unsigned NewOpc;
switch (Inst.getOpcode()) {
default: llvm_unreachable("Invalid opcode");
- case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
- case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
- case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
- case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
- case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
- case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
- case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
- case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
- case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
- case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
- case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
- case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
+ case X86::VMOVZPQILo2PQIrr: NewOpc = X86::VMOVPQI2QIrr; break;
+ case X86::VMOVAPDrr: NewOpc = X86::VMOVAPDrr_REV; break;
+ case X86::VMOVAPDYrr: NewOpc = X86::VMOVAPDYrr_REV; break;
+ case X86::VMOVAPSrr: NewOpc = X86::VMOVAPSrr_REV; break;
+ case X86::VMOVAPSYrr: NewOpc = X86::VMOVAPSYrr_REV; break;
+ case X86::VMOVDQArr: NewOpc = X86::VMOVDQArr_REV; break;
+ case X86::VMOVDQAYrr: NewOpc = X86::VMOVDQAYrr_REV; break;
+ case X86::VMOVDQUrr: NewOpc = X86::VMOVDQUrr_REV; break;
+ case X86::VMOVDQUYrr: NewOpc = X86::VMOVDQUYrr_REV; break;
+ case X86::VMOVUPDrr: NewOpc = X86::VMOVUPDrr_REV; break;
+ case X86::VMOVUPDYrr: NewOpc = X86::VMOVUPDYrr_REV; break;
+ case X86::VMOVUPSrr: NewOpc = X86::VMOVUPSrr_REV; break;
+ case X86::VMOVUPSYrr: NewOpc = X86::VMOVUPSYrr_REV; break;
}
Inst.setOpcode(NewOpc);
return true;
@@ -2573,9 +2530,6 @@ bool X86AsmParser::MatchAndEmitATTInstruction(SMLoc IDLoc, unsigned &Opcode,
isParsingIntelSyntax())) {
default: llvm_unreachable("Unexpected match result!");
case Match_Success:
- if (!validateInstruction(Inst, Operands))
- return true;
-
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the
// individual transformations can chain off each other.
@@ -2819,9 +2773,6 @@ bool X86AsmParser::MatchAndEmitIntelInstruction(SMLoc IDLoc, unsigned &Opcode,
unsigned NumSuccessfulMatches =
std::count(std::begin(Match), std::end(Match), Match_Success);
if (NumSuccessfulMatches == 1) {
- if (!validateInstruction(Inst, Operands))
- return true;
-
// Some instructions need post-processing to, for example, tweak which
// encoding is selected. Loop on it while changes happen so the individual
// transformations can chain off each other.
@@ -2898,10 +2849,29 @@ bool X86AsmParser::ParseDirective(AsmToken DirectiveID) {
"a '%' prefix in .intel_syntax");
}
return false;
- }
+ } else if (IDVal == ".even")
+ return parseDirectiveEven(DirectiveID.getLoc());
return true;
}
+/// parseDirectiveEven
+/// ::= .even
+bool X86AsmParser::parseDirectiveEven(SMLoc L) {
+ const MCSection *Section = getStreamer().getCurrentSection().first;
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ TokError("unexpected token in directive");
+ return false;
+ }
+ if (!Section) {
+ getStreamer().InitSections(false);
+ Section = getStreamer().getCurrentSection().first;
+ }
+ if (Section->UseCodeAlign())
+ getStreamer().EmitCodeAlignment(2, 0);
+ else
+ getStreamer().EmitValueToAlignment(2, 0, 1, 0);
+ return false;
+}
/// ParseDirectiveWord
/// ::= .word [ expression (, expression)* ]
bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
@@ -2909,10 +2879,19 @@ bool X86AsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) {
if (getLexer().isNot(AsmToken::EndOfStatement)) {
for (;;) {
const MCExpr *Value;
+ SMLoc ExprLoc = getLexer().getLoc();
if (getParser().parseExpression(Value))
return false;
- getParser().getStreamer().EmitValue(Value, Size);
+ if (const auto *MCE = dyn_cast<MCConstantExpr>(Value)) {
+ assert(Size <= 8 && "Invalid size");
+ uint64_t IntValue = MCE->getValue();
+ if (!isUIntN(8 * Size, IntValue) && !isIntN(8 * Size, IntValue))
+ return Error(ExprLoc, "literal value out of range for directive");
+ getStreamer().EmitIntValue(IntValue, Size);
+ } else {
+ getStreamer().EmitValue(Value, Size, ExprLoc);
+ }
if (getLexer().is(AsmToken::EndOfStatement))
break;
diff --git a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h
index 7610806c4578..54538c804a03 100644
--- a/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h
+++ b/contrib/llvm/lib/Target/X86/AsmParser/X86AsmParserCommon.h
@@ -13,30 +13,25 @@
namespace llvm {
inline bool isImmSExti16i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0x000000000000FF80ULL <= Value && Value <= 0x000000000000FFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isInt<8>(Value) ||
+ (isUInt<16>(Value) && isInt<8>(static_cast<int16_t>(Value)));
}
inline bool isImmSExti32i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0x00000000FFFFFF80ULL <= Value && Value <= 0x00000000FFFFFFFFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isInt<8>(Value) ||
+ (isUInt<32>(Value) && isInt<8>(static_cast<int32_t>(Value)));
}
inline bool isImmSExti64i8Value(uint64_t Value) {
- return (( Value <= 0x000000000000007FULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isInt<8>(Value);
}
inline bool isImmSExti64i32Value(uint64_t Value) {
- return (( Value <= 0x000000007FFFFFFFULL)||
- (0xFFFFFFFF80000000ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isInt<32>(Value);
}
inline bool isImmUnsignedi8Value(uint64_t Value) {
- return (( Value <= 0x00000000000000FFULL)||
- (0xFFFFFFFFFFFFFF80ULL <= Value && Value <= 0xFFFFFFFFFFFFFFFFULL));
+ return isUInt<8>(Value) || isInt<8>(Value);
}
} // End of namespace llvm