diff options
Diffstat (limited to 'lib/Target/X86')
47 files changed, 4608 insertions, 2011 deletions
diff --git a/lib/Target/X86/AsmParser/X86AsmParser.cpp b/lib/Target/X86/AsmParser/X86AsmParser.cpp index f1e66ab9d2c3..f8588d818b75 100644 --- a/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ b/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -9,6 +9,8 @@ #include "llvm/Target/TargetAsmParser.h" #include "X86.h" +#include "X86Subtarget.h" +#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Twine.h" @@ -19,6 +21,7 @@ #include "llvm/MC/MCParser/MCAsmParser.h" #include "llvm/MC/MCParser/MCParsedAsmOperand.h" #include "llvm/Support/SourceMgr.h" +#include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetRegistry.h" #include "llvm/Target/TargetAsmParser.h" using namespace llvm; @@ -28,6 +31,7 @@ struct X86Operand; class X86ATTAsmParser : public TargetAsmParser { MCAsmParser &Parser; + TargetMachine &TM; protected: unsigned Is64Bit : 1; @@ -37,8 +41,6 @@ private: MCAsmLexer &getLexer() const { return Parser.getLexer(); } - void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } - bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); @@ -48,13 +50,14 @@ private: bool ParseDirectiveWord(unsigned Size, SMLoc L); - void InstructionCleanup(MCInst &Inst); + bool MatchInstruction(SMLoc IDLoc, + const SmallVectorImpl<MCParsedAsmOperand*> &Operands, + MCInst &Inst); - /// @name Auto-generated Match Functions + /// @name Auto-generated Matcher Functions /// { - bool MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> &Operands, - MCInst &Inst); + unsigned ComputeAvailableFeatures(const X86Subtarget *Subtarget) const; bool MatchInstructionImpl( const SmallVectorImpl<MCParsedAsmOperand*> &Operands, MCInst &Inst); @@ -62,27 +65,32 @@ private: /// } public: - X86ATTAsmParser(const Target &T, MCAsmParser &_Parser) - : TargetAsmParser(T), Parser(_Parser) {} + X86ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM) + : TargetAsmParser(T), Parser(_Parser), TM(TM) { + + // Initialize the set of available features. + setAvailableFeatures(ComputeAvailableFeatures( + &TM.getSubtarget<X86Subtarget>())); + } virtual bool ParseInstruction(StringRef Name, SMLoc NameLoc, SmallVectorImpl<MCParsedAsmOperand*> &Operands); virtual bool ParseDirective(AsmToken DirectiveID); }; - + class X86_32ATTAsmParser : public X86ATTAsmParser { public: - X86_32ATTAsmParser(const Target &T, MCAsmParser &_Parser) - : X86ATTAsmParser(T, _Parser) { + X86_32ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM) + : X86ATTAsmParser(T, _Parser, TM) { Is64Bit = false; } }; class X86_64ATTAsmParser : public X86ATTAsmParser { public: - X86_64ATTAsmParser(const Target &T, MCAsmParser &_Parser) - : X86ATTAsmParser(T, _Parser) { + X86_64ATTAsmParser(const Target &T, MCAsmParser &_Parser, TargetMachine &TM) + : X86ATTAsmParser(T, _Parser, TM) { Is64Bit = true; } }; @@ -90,7 +98,7 @@ public: } // end anonymous namespace /// @name Auto-generated Match Functions -/// { +/// { static unsigned MatchRegisterName(StringRef Name); @@ -109,7 +117,7 @@ struct X86Operand : public MCParsedAsmOperand { } Kind; SMLoc StartLoc, EndLoc; - + union { struct { const char *Data; @@ -141,6 +149,8 @@ struct X86Operand : public MCParsedAsmOperand { /// getEndLoc - Get the location of the last token of this operand. SMLoc getEndLoc() const { return EndLoc; } + virtual void dump(raw_ostream &OS) const {} + StringRef getToken() const { assert(Kind == Token && "Invalid access!"); return StringRef(Tok.Data, Tok.Length); @@ -185,7 +195,7 @@ struct X86Operand : public MCParsedAsmOperand { bool isToken() const {return Kind == Token; } bool isImm() const { return Kind == Immediate; } - + bool isImmSExti16i8() const { if (!isImm()) return false; @@ -260,10 +270,6 @@ struct X86Operand : public MCParsedAsmOperand { !getMemIndexReg() && getMemScale() == 1; } - bool isNoSegMem() const { - return Kind == Memory && !getMemSegReg(); - } - bool isReg() const { return Kind == Register; } void addExpr(MCInst &Inst, const MCExpr *Expr) const { @@ -298,14 +304,6 @@ struct X86Operand : public MCParsedAsmOperand { Inst.addOperand(MCOperand::CreateExpr(getMemDisp())); } - void addNoSegMemOperands(MCInst &Inst, unsigned N) const { - assert((N == 4) && "Invalid number of operands!"); - Inst.addOperand(MCOperand::CreateReg(getMemBaseReg())); - Inst.addOperand(MCOperand::CreateImm(getMemScale())); - Inst.addOperand(MCOperand::CreateReg(getMemIndexReg())); - addExpr(Inst, getMemDisp()); - } - static X86Operand *CreateToken(StringRef Str, SMLoc Loc) { X86Operand *Res = new X86Operand(Token, Loc, Loc); Res->Tok.Data = Str.data(); @@ -376,13 +374,19 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo, // FIXME: Validate register for the current architecture; we have to do // validation later, so maybe there is no need for this here. RegNo = MatchRegisterName(Tok.getString()); - + + // FIXME: This should be done using Requires<In32BitMode> and + // Requires<In64BitMode> so "eiz" usage in 64-bit instructions + // can be also checked. + if (RegNo == X86::RIZ && !Is64Bit) + return Error(Tok.getLoc(), "riz register in 64-bit mode only"); + // Parse %st(1) and "%st" as "%st(0)" if (RegNo == 0 && Tok.getString() == "st") { RegNo = X86::ST0; EndLoc = Tok.getLoc(); Parser.Lex(); // Eat 'st' - + // Check to see if we have '(4)' after %st. if (getLexer().isNot(AsmToken::LParen)) return false; @@ -403,15 +407,15 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo, case 7: RegNo = X86::ST7; break; default: return Error(IntTok.getLoc(), "invalid stack index"); } - + if (getParser().Lex().isNot(AsmToken::RParen)) return Error(Parser.getTok().getLoc(), "expected ')'"); - + EndLoc = Tok.getLoc(); Parser.Lex(); // Eat ')' return false; } - + // If this is "db[0-7]", match it as an alias // for dr[0-7]. if (RegNo == 0 && Tok.getString().size() == 3 && @@ -426,14 +430,14 @@ bool X86ATTAsmParser::ParseRegister(unsigned &RegNo, case '6': RegNo = X86::DR6; break; case '7': RegNo = X86::DR7; break; } - + if (RegNo != 0) { EndLoc = Tok.getLoc(); Parser.Lex(); // Eat it. return false; } } - + if (RegNo == 0) return Error(Tok.getLoc(), "invalid register name"); @@ -452,13 +456,17 @@ X86Operand *X86ATTAsmParser::ParseOperand() { unsigned RegNo; SMLoc Start, End; if (ParseRegister(RegNo, Start, End)) return 0; - + if (RegNo == X86::EIZ || RegNo == X86::RIZ) { + Error(Start, "eiz and riz can only be used as index registers"); + return 0; + } + // If this is a segment register followed by a ':', then this is the start // of a memory reference, otherwise this is a normal register reference. if (getLexer().isNot(AsmToken::Colon)) return X86Operand::CreateReg(RegNo, Start, End); - - + + getParser().Lex(); // Eat the colon. return ParseMemOperand(RegNo, Start); } @@ -477,7 +485,7 @@ X86Operand *X86ATTAsmParser::ParseOperand() { /// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix /// has already been parsed if present. X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { - + // We have to disambiguate a parenthesized expression "(4+5)" from the start // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The // only way to do this without lookahead is to eat the '(' and see what is @@ -486,7 +494,7 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { if (getLexer().isNot(AsmToken::LParen)) { SMLoc ExprEnd; if (getParser().ParseExpression(Disp, ExprEnd)) return 0; - + // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. if (getLexer().isNot(AsmToken::LParen)) { @@ -495,7 +503,7 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { return X86Operand::CreateMem(Disp, MemStart, ExprEnd); return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd); } - + // Eat the '('. Parser.Lex(); } else { @@ -503,17 +511,17 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // so we have to eat the ( to see beyond it. SMLoc LParenLoc = Parser.getTok().getLoc(); Parser.Lex(); // Eat the '('. - + if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) { // Nothing to do here, fall into the code below with the '(' part of the // memory operand consumed. } else { SMLoc ExprEnd; - + // It must be an parenthesized expression, parse it now. if (getParser().ParseParenExpression(Disp, ExprEnd)) return 0; - + // After parsing the base expression we could either have a parenthesized // memory address or not. If not, return now. If so, eat the (. if (getLexer().isNot(AsmToken::LParen)) { @@ -522,21 +530,25 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { return X86Operand::CreateMem(Disp, LParenLoc, ExprEnd); return X86Operand::CreateMem(SegReg, Disp, 0, 0, 1, MemStart, ExprEnd); } - + // Eat the '('. Parser.Lex(); } } - + // If we reached here, then we just ate the ( of the memory operand. Process // the rest of the memory operand. unsigned BaseReg = 0, IndexReg = 0, Scale = 1; - + if (getLexer().is(AsmToken::Percent)) { SMLoc L; if (ParseRegister(BaseReg, L, L)) return 0; + if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { + Error(L, "eiz and riz can only be used as index registers"); + return 0; + } } - + if (getLexer().is(AsmToken::Comma)) { Parser.Lex(); // Eat the comma. @@ -545,11 +557,11 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { // correctly. // // Not that even though it would be completely consistent to support syntax - // like "1(%eax,,1)", the assembler doesn't. + // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this. if (getLexer().is(AsmToken::Percent)) { SMLoc L; if (ParseRegister(IndexReg, L, L)) return 0; - + if (getLexer().isNot(AsmToken::RParen)) { // Parse the scale amount: // ::= ',' [scale-expression] @@ -566,7 +578,7 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { int64_t ScaleVal; if (getParser().ParseAbsoluteExpression(ScaleVal)) return 0; - + // Validate the scale amount. if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && ScaleVal != 8){ Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); @@ -576,19 +588,20 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { } } } else if (getLexer().isNot(AsmToken::RParen)) { - // Otherwise we have the unsupported form of a scale amount without an + // A scale amount without an index is ignored. // index. SMLoc Loc = Parser.getTok().getLoc(); int64_t Value; if (getParser().ParseAbsoluteExpression(Value)) return 0; - - Error(Loc, "cannot have scale factor without index register"); - return 0; + + if (Value != 1) + Warning(Loc, "scale factor without index register is ignored"); + Scale = 1; } } - + // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. if (getLexer().isNot(AsmToken::RParen)) { Error(Parser.getTok().getLoc(), "unexpected token in memory operand"); @@ -596,7 +609,7 @@ X86Operand *X86ATTAsmParser::ParseMemOperand(unsigned SegReg, SMLoc MemStart) { } SMLoc MemEnd = Parser.getTok().getLoc(); Parser.Lex(); // Eat the ')'. - + return X86Operand::CreateMem(SegReg, Disp, BaseReg, IndexReg, Scale, MemStart, MemEnd); } @@ -743,6 +756,23 @@ ParseInstruction(StringRef Name, SMLoc NameLoc, } } } + + // FIXME: Hack to recognize vpclmul<src1_quadword, src2_quadword>dq + if (PatchedName.startswith("vpclmul")) { + unsigned CLMULQuadWordSelect = StringSwitch<unsigned>( + PatchedName.slice(7, PatchedName.size() - 2)) + .Case("lqlq", 0x00) // src1[63:0], src2[63:0] + .Case("hqlq", 0x01) // src1[127:64], src2[63:0] + .Case("lqhq", 0x10) // src1[63:0], src2[127:64] + .Case("hqhq", 0x11) // src1[127:64], src2[127:64] + .Default(~0U); + if (CLMULQuadWordSelect != ~0U) { + ExtraImmOp = MCConstantExpr::Create(CLMULQuadWordSelect, + getParser().getContext()); + assert(PatchedName.endswith("dq") && "Unexpected mnemonic!"); + PatchedName = "vpclmulqdq"; + } + } Operands.push_back(X86Operand::CreateToken(PatchedName, NameLoc)); if (ExtraImmOp) @@ -785,6 +815,20 @@ ParseInstruction(StringRef Name, SMLoc NameLoc, Operands.erase(Operands.begin() + 1); } + // FIXME: Hack to handle "out[bwl]? %al, (%dx)" -> "outb %al, %dx". + if ((Name == "outb" || Name == "outw" || Name == "outl" || Name == "out") && + Operands.size() == 3) { + X86Operand &Op = *(X86Operand*)Operands.back(); + if (Op.isMem() && Op.Mem.SegReg == 0 && + isa<MCConstantExpr>(Op.Mem.Disp) && + cast<MCConstantExpr>(Op.Mem.Disp)->getValue() == 0 && + Op.Mem.BaseReg == MatchRegisterName("dx") && Op.Mem.IndexReg == 0) { + SMLoc Loc = Op.getEndLoc(); + Operands.back() = X86Operand::CreateReg(Op.Mem.BaseReg, Loc, Loc); + delete &Op; + } + } + // FIXME: Hack to handle "f{mul*,add*,sub*,div*} $op, st(0)" the same as // "f{mul*,add*,sub*,div*} $op" if ((Name.startswith("fmul") || Name.startswith("fadd") || @@ -796,6 +840,16 @@ ParseInstruction(StringRef Name, SMLoc NameLoc, Operands.erase(Operands.begin() + 2); } + // FIXME: Hack to handle "imul <imm>, B" which is an alias for "imul <imm>, B, + // B". + if (Name.startswith("imul") && Operands.size() == 3 && + static_cast<X86Operand*>(Operands[1])->isImm() && + static_cast<X86Operand*>(Operands.back())->isReg()) { + X86Operand *Op = static_cast<X86Operand*>(Operands.back()); + Operands.push_back(X86Operand::CreateReg(Op->getReg(), Op->getStartLoc(), + Op->getEndLoc())); + } + return false; } @@ -819,7 +873,7 @@ bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { if (getLexer().is(AsmToken::EndOfStatement)) break; - + // FIXME: Improve diagnostic. if (getLexer().isNot(AsmToken::Comma)) return Error(L, "unexpected token in directive"); @@ -831,82 +885,32 @@ bool X86ATTAsmParser::ParseDirectiveWord(unsigned Size, SMLoc L) { return false; } -/// LowerMOffset - Lower an 'moffset' form of an instruction, which just has a -/// imm operand, to having "rm" or "mr" operands with the offset in the disp -/// field. -static void LowerMOffset(MCInst &Inst, unsigned Opc, unsigned RegNo, - bool isMR) { - MCOperand Disp = Inst.getOperand(0); - - // Start over with an empty instruction. - Inst = MCInst(); - Inst.setOpcode(Opc); - - if (!isMR) - Inst.addOperand(MCOperand::CreateReg(RegNo)); - - // Add the mem operand. - Inst.addOperand(MCOperand::CreateReg(0)); // Segment - Inst.addOperand(MCOperand::CreateImm(1)); // Scale - Inst.addOperand(MCOperand::CreateReg(0)); // IndexReg - Inst.addOperand(Disp); // Displacement - Inst.addOperand(MCOperand::CreateReg(0)); // BaseReg - - if (isMR) - Inst.addOperand(MCOperand::CreateReg(RegNo)); -} - -// FIXME: Custom X86 cleanup function to implement a temporary hack to handle -// matching INCL/DECL correctly for x86_64. This needs to be replaced by a -// proper mechanism for supporting (ambiguous) feature dependent instructions. -void X86ATTAsmParser::InstructionCleanup(MCInst &Inst) { - if (!Is64Bit) return; - - switch (Inst.getOpcode()) { - case X86::DEC16r: Inst.setOpcode(X86::DEC64_16r); break; - case X86::DEC16m: Inst.setOpcode(X86::DEC64_16m); break; - case X86::DEC32r: Inst.setOpcode(X86::DEC64_32r); break; - case X86::DEC32m: Inst.setOpcode(X86::DEC64_32m); break; - case X86::INC16r: Inst.setOpcode(X86::INC64_16r); break; - case X86::INC16m: Inst.setOpcode(X86::INC64_16m); break; - case X86::INC32r: Inst.setOpcode(X86::INC64_32r); break; - case X86::INC32m: Inst.setOpcode(X86::INC64_32m); break; - - // moffset instructions are x86-32 only. - case X86::MOV8o8a: LowerMOffset(Inst, X86::MOV8rm , X86::AL , false); break; - case X86::MOV16o16a: LowerMOffset(Inst, X86::MOV16rm, X86::AX , false); break; - case X86::MOV32o32a: LowerMOffset(Inst, X86::MOV32rm, X86::EAX, false); break; - case X86::MOV8ao8: LowerMOffset(Inst, X86::MOV8mr , X86::AL , true); break; - case X86::MOV16ao16: LowerMOffset(Inst, X86::MOV16mr, X86::AX , true); break; - case X86::MOV32ao32: LowerMOffset(Inst, X86::MOV32mr, X86::EAX, true); break; - } -} bool -X86ATTAsmParser::MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> +X86ATTAsmParser::MatchInstruction(SMLoc IDLoc, + const SmallVectorImpl<MCParsedAsmOperand*> &Operands, MCInst &Inst) { + assert(!Operands.empty() && "Unexpect empty operand list!"); + + X86Operand *Op = static_cast<X86Operand*>(Operands[0]); + assert(Op->isToken() && "Leading operand should always be a mnemonic!"); + // First, try a direct match. if (!MatchInstructionImpl(Operands, Inst)) return false; - // Ignore anything which is obviously not a suffix match. - if (Operands.size() == 0) - return true; - X86Operand *Op = static_cast<X86Operand*>(Operands[0]); - if (!Op->isToken() || Op->getToken().size() > 15) - return true; - // FIXME: Ideally, we would only attempt suffix matches for things which are // valid prefixes, and we could just infer the right unambiguous // type. However, that requires substantially more matcher support than the // following hack. // Change the operand to point to a temporary token. - char Tmp[16]; StringRef Base = Op->getToken(); - memcpy(Tmp, Base.data(), Base.size()); - Op->setTokenValue(StringRef(Tmp, Base.size() + 1)); + SmallString<16> Tmp; + Tmp += Base; + Tmp += ' '; + Op->setTokenValue(Tmp.str()); // Check for the various suffix matches. Tmp[Base.size()] = 'b'; @@ -928,6 +932,38 @@ X86ATTAsmParser::MatchInstruction(const SmallVectorImpl<MCParsedAsmOperand*> return false; // Otherwise, the match failed. + + // If we had multiple suffix matches, then identify this as an ambiguous + // match. + if (MatchB + MatchW + MatchL + MatchQ != 4) { + char MatchChars[4]; + unsigned NumMatches = 0; + if (!MatchB) + MatchChars[NumMatches++] = 'b'; + if (!MatchW) + MatchChars[NumMatches++] = 'w'; + if (!MatchL) + MatchChars[NumMatches++] = 'l'; + if (!MatchQ) + MatchChars[NumMatches++] = 'q'; + + SmallString<126> Msg; + raw_svector_ostream OS(Msg); + OS << "ambiguous instructions require an explicit suffix (could be "; + for (unsigned i = 0; i != NumMatches; ++i) { + if (i != 0) + OS << ", "; + if (i + 1 == NumMatches) + OS << "or "; + OS << "'" << Base << MatchChars[i] << "'"; + } + OS << ")"; + Error(IDLoc, OS.str()); + } else { + // FIXME: We should give nicer diagnostics about the exact failure. + Error(IDLoc, "unrecognized instruction"); + } + return true; } diff --git a/lib/Target/X86/AsmPrinter/CMakeLists.txt b/lib/Target/X86/AsmPrinter/CMakeLists.txt index b70a587ec4e2..033973eeeff9 100644 --- a/lib/Target/X86/AsmPrinter/CMakeLists.txt +++ b/lib/Target/X86/AsmPrinter/CMakeLists.txt @@ -2,8 +2,7 @@ include_directories( ${CMAKE_CURRENT_BINARY_DIR}/.. ${CMAKE_CURRENT_SOURCE_DIR}/ add_llvm_library(LLVMX86AsmPrinter X86ATTInstPrinter.cpp - X86AsmPrinter.cpp X86IntelInstPrinter.cpp - X86MCInstLower.cpp + X86InstComments.cpp ) add_dependencies(LLVMX86AsmPrinter X86CodeGenTable_gen) diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp index f2cdb5ba55eb..554b96c96e0e 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.cpp @@ -14,6 +14,7 @@ #define DEBUG_TYPE "asm-printer" #include "X86ATTInstPrinter.h" +#include "X86InstComments.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" @@ -31,6 +32,10 @@ using namespace llvm; void X86ATTInstPrinter::printInst(const MCInst *MI, raw_ostream &OS) { printInstruction(MI, OS); + + // If verbose assembly is enabled, we can print some informative comments. + if (CommentStream) + EmitAnyX86InstComments(MI, *CommentStream, getRegisterName); } StringRef X86ATTInstPrinter::getOpcodeName(unsigned Opcode) const { return getInstructionName(Opcode); diff --git a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h index 3be4bae5bec2..eb986643014c 100644 --- a/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h +++ b/lib/Target/X86/AsmPrinter/X86ATTInstPrinter.h @@ -56,6 +56,9 @@ public: void printi128mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); } + void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + printMemReference(MI, OpNo, O); + } void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { printMemReference(MI, OpNo, O); } diff --git a/lib/Target/X86/AsmPrinter/X86InstComments.cpp b/lib/Target/X86/AsmPrinter/X86InstComments.cpp new file mode 100644 index 000000000000..da9d5a3579e5 --- /dev/null +++ b/lib/Target/X86/AsmPrinter/X86InstComments.cpp @@ -0,0 +1,232 @@ +//===-- X86InstComments.cpp - Generate verbose-asm comments for instrs ----===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This defines functionality used to emit comments about X86 instructions to +// an output stream for -fverbose-asm. +// +//===----------------------------------------------------------------------===// + +#include "X86InstComments.h" +#include "X86GenInstrNames.inc" +#include "llvm/MC/MCInst.h" +#include "llvm/Support/raw_ostream.h" +#include "../X86ShuffleDecode.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Top Level Entrypoint +//===----------------------------------------------------------------------===// + +/// EmitAnyX86InstComments - This function decodes x86 instructions and prints +/// newline terminated strings to the specified string if desired. This +/// information is shown in disassembly dumps when verbose assembly is enabled. +void llvm::EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, + const char *(*getRegName)(unsigned)) { + // If this is a shuffle operation, the switch should fill in this state. + SmallVector<unsigned, 8> ShuffleMask; + const char *DestName = 0, *Src1Name = 0, *Src2Name = 0; + + switch (MI->getOpcode()) { + case X86::INSERTPSrr: + Src1Name = getRegName(MI->getOperand(1).getReg()); + Src2Name = getRegName(MI->getOperand(2).getReg()); + DecodeINSERTPSMask(MI->getOperand(3).getImm(), ShuffleMask); + break; + + case X86::MOVLHPSrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodeMOVLHPSMask(2, ShuffleMask); + break; + + case X86::MOVHLPSrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodeMOVHLPSMask(2, ShuffleMask); + break; + + case X86::PSHUFDri: + Src1Name = getRegName(MI->getOperand(1).getReg()); + // FALL THROUGH. + case X86::PSHUFDmi: + DestName = getRegName(MI->getOperand(0).getReg()); + DecodePSHUFMask(4, MI->getOperand(MI->getNumOperands()-1).getImm(), + ShuffleMask); + break; + + case X86::PSHUFHWri: + Src1Name = getRegName(MI->getOperand(1).getReg()); + // FALL THROUGH. + case X86::PSHUFHWmi: + DestName = getRegName(MI->getOperand(0).getReg()); + DecodePSHUFHWMask(MI->getOperand(MI->getNumOperands()-1).getImm(), + ShuffleMask); + break; + case X86::PSHUFLWri: + Src1Name = getRegName(MI->getOperand(1).getReg()); + // FALL THROUGH. + case X86::PSHUFLWmi: + DestName = getRegName(MI->getOperand(0).getReg()); + DecodePSHUFLWMask(MI->getOperand(MI->getNumOperands()-1).getImm(), + ShuffleMask); + break; + + case X86::PUNPCKHBWrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKHBWrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKHMask(16, ShuffleMask); + break; + case X86::PUNPCKHWDrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKHWDrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKHMask(8, ShuffleMask); + break; + case X86::PUNPCKHDQrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKHDQrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKHMask(4, ShuffleMask); + break; + case X86::PUNPCKHQDQrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKHQDQrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKHMask(2, ShuffleMask); + break; + + case X86::PUNPCKLBWrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKLBWrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKLMask(16, ShuffleMask); + break; + case X86::PUNPCKLWDrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKLWDrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKLMask(8, ShuffleMask); + break; + case X86::PUNPCKLDQrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKLDQrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKLMask(4, ShuffleMask); + break; + case X86::PUNPCKLQDQrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::PUNPCKLQDQrm: + Src1Name = getRegName(MI->getOperand(0).getReg()); + DecodePUNPCKLMask(2, ShuffleMask); + break; + + case X86::SHUFPDrri: + DecodeSHUFPSMask(2, MI->getOperand(3).getImm(), ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + Src2Name = getRegName(MI->getOperand(2).getReg()); + break; + + case X86::SHUFPSrri: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::SHUFPSrmi: + DecodeSHUFPSMask(4, MI->getOperand(3).getImm(), ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + break; + + case X86::UNPCKLPDrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::UNPCKLPDrm: + DecodeUNPCKLPMask(2, ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + break; + case X86::UNPCKLPSrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::UNPCKLPSrm: + DecodeUNPCKLPMask(4, ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + break; + case X86::UNPCKHPDrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::UNPCKHPDrm: + DecodeUNPCKHPMask(2, ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + break; + case X86::UNPCKHPSrr: + Src2Name = getRegName(MI->getOperand(2).getReg()); + // FALL THROUGH. + case X86::UNPCKHPSrm: + DecodeUNPCKHPMask(4, ShuffleMask); + Src1Name = getRegName(MI->getOperand(0).getReg()); + break; + } + + + // If this was a shuffle operation, print the shuffle mask. + if (!ShuffleMask.empty()) { + if (DestName == 0) DestName = Src1Name; + OS << (DestName ? DestName : "mem") << " = "; + + // If the two sources are the same, canonicalize the input elements to be + // from the first src so that we get larger element spans. + if (Src1Name == Src2Name) { + for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) { + if ((int)ShuffleMask[i] >= 0 && // Not sentinel. + ShuffleMask[i] >= e) // From second mask. + ShuffleMask[i] -= e; + } + } + + // The shuffle mask specifies which elements of the src1/src2 fill in the + // destination, with a few sentinel values. Loop through and print them + // out. + for (unsigned i = 0, e = ShuffleMask.size(); i != e; ++i) { + if (i != 0) + OS << ','; + if (ShuffleMask[i] == SM_SentinelZero) { + OS << "zero"; + continue; + } + + // Otherwise, it must come from src1 or src2. Print the span of elements + // that comes from this src. + bool isSrc1 = ShuffleMask[i] < ShuffleMask.size(); + const char *SrcName = isSrc1 ? Src1Name : Src2Name; + OS << (SrcName ? SrcName : "mem") << '['; + bool IsFirst = true; + while (i != e && + (int)ShuffleMask[i] >= 0 && + (ShuffleMask[i] < ShuffleMask.size()) == isSrc1) { + if (!IsFirst) + OS << ','; + else + IsFirst = false; + OS << ShuffleMask[i] % ShuffleMask.size(); + ++i; + } + OS << ']'; + --i; // For loop increments element #. + } + //MI->print(OS, 0); + OS << "\n"; + } + +} diff --git a/lib/Target/X86/AsmPrinter/X86InstComments.h b/lib/Target/X86/AsmPrinter/X86InstComments.h new file mode 100644 index 000000000000..6b86db4f9e5c --- /dev/null +++ b/lib/Target/X86/AsmPrinter/X86InstComments.h @@ -0,0 +1,25 @@ +//===-- X86InstComments.h - Generate verbose-asm comments for instrs ------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This defines functionality used to emit comments about X86 instructions to +// an output stream for -fverbose-asm. +// +//===----------------------------------------------------------------------===// + +#ifndef X86_INST_COMMENTS_H +#define X86_INST_COMMENTS_H + +namespace llvm { + class MCInst; + class raw_ostream; + void EmitAnyX86InstComments(const MCInst *MI, raw_ostream &OS, + const char *(*getRegName)(unsigned)); +} + +#endif diff --git a/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp b/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp index a632047f6592..5625b0ea618f 100644 --- a/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp +++ b/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.cpp @@ -14,6 +14,7 @@ #define DEBUG_TYPE "asm-printer" #include "X86IntelInstPrinter.h" +#include "X86InstComments.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCExpr.h" @@ -30,6 +31,10 @@ using namespace llvm; void X86IntelInstPrinter::printInst(const MCInst *MI, raw_ostream &OS) { printInstruction(MI, OS); + + // If verbose assembly is enabled, we can print some informative comments. + if (CommentStream) + EmitAnyX86InstComments(MI, *CommentStream, getRegisterName); } StringRef X86IntelInstPrinter::getOpcodeName(unsigned Opcode) const { return getInstructionName(Opcode); diff --git a/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h b/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h index 4d680744dd60..6f120322742b 100644 --- a/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h +++ b/lib/Target/X86/AsmPrinter/X86IntelInstPrinter.h @@ -64,6 +64,10 @@ public: O << "XMMWORD PTR "; printMemReference(MI, OpNo, O); } + void printi256mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { + O << "YMMWORD PTR "; + printMemReference(MI, OpNo, O); + } void printf32mem(const MCInst *MI, unsigned OpNo, raw_ostream &O) { O << "DWORD PTR "; printMemReference(MI, OpNo, O); diff --git a/lib/Target/X86/CMakeLists.txt b/lib/Target/X86/CMakeLists.txt index 133482036ce1..e9399f5c8322 100644 --- a/lib/Target/X86/CMakeLists.txt +++ b/lib/Target/X86/CMakeLists.txt @@ -18,23 +18,24 @@ tablegen(X86GenEDInfo.inc -gen-enhanced-disassembly-info) set(sources SSEDomainFix.cpp X86AsmBackend.cpp - X86CodeEmitter.cpp + X86AsmPrinter.cpp X86COFFMachineModuleInfo.cpp + X86CodeEmitter.cpp X86ELFWriterInfo.cpp + X86FastISel.cpp X86FloatingPoint.cpp - X86FloatingPointRegKill.cpp X86ISelDAGToDAG.cpp X86ISelLowering.cpp X86InstrInfo.cpp X86JITInfo.cpp X86MCAsmInfo.cpp X86MCCodeEmitter.cpp + X86MCInstLower.cpp X86RegisterInfo.cpp + X86SelectionDAGInfo.cpp X86Subtarget.cpp X86TargetMachine.cpp X86TargetObjectFile.cpp - X86FastISel.cpp - X86SelectionDAGInfo.cpp ) if( CMAKE_CL_64 ) @@ -49,4 +50,3 @@ endif() add_llvm_target(X86CodeGen ${sources}) -target_link_libraries (LLVMX86CodeGen LLVMSelectionDAG) diff --git a/lib/Target/X86/README-FPStack.txt b/lib/Target/X86/README-FPStack.txt index be28e8b394a4..39efd2dbcf1a 100644 --- a/lib/Target/X86/README-FPStack.txt +++ b/lib/Target/X86/README-FPStack.txt @@ -27,8 +27,8 @@ def FpIADD32m : FpI<(ops RFP:$dst, RFP:$src1, i32mem:$src2), OneArgFPRW, //===---------------------------------------------------------------------===// -The FP stackifier needs to be global. Also, it should handle simple permutates -to reduce number of shuffle instructions, e.g. turning: +The FP stackifier should handle simple permutates to reduce number of shuffle +instructions, e.g. turning: fld P -> fld Q fld Q fld P diff --git a/lib/Target/X86/README-SSE.txt b/lib/Target/X86/README-SSE.txt index b6aba93f3738..f96b22f1e204 100644 --- a/lib/Target/X86/README-SSE.txt +++ b/lib/Target/X86/README-SSE.txt @@ -2,8 +2,46 @@ // Random ideas for the X86 backend: SSE-specific stuff. //===---------------------------------------------------------------------===// -- Consider eliminating the unaligned SSE load intrinsics, replacing them with - unaligned LLVM load instructions. +//===---------------------------------------------------------------------===// + +SSE Variable shift can be custom lowered to something like this, which uses a +small table + unaligned load + shuffle instead of going through memory. + +__m128i_shift_right: + .byte 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 + .byte -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1 + +... +__m128i shift_right(__m128i value, unsigned long offset) { + return _mm_shuffle_epi8(value, + _mm_loadu_si128((__m128 *) (___m128i_shift_right + offset))); +} + +//===---------------------------------------------------------------------===// + +SSE has instructions for doing operations on complex numbers, we should pattern +match them. Compiling this: + +_Complex float f32(_Complex float A, _Complex float B) { + return A+B; +} + +into: + +_f32: + movdqa %xmm0, %xmm2 + addss %xmm1, %xmm2 + pshufd $16, %xmm2, %xmm2 + pshufd $1, %xmm1, %xmm1 + pshufd $1, %xmm0, %xmm0 + addss %xmm1, %xmm0 + pshufd $16, %xmm0, %xmm1 + movdqa %xmm2, %xmm0 + unpcklps %xmm1, %xmm0 + ret + +seems silly. + //===---------------------------------------------------------------------===// diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index efc0cd82f23e..a305ae6ec550 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1135,13 +1135,6 @@ void test(double *P) { //===---------------------------------------------------------------------===// -handling llvm.memory.barrier on pre SSE2 cpus - -should generate: -lock ; mov %esp, %esp - -//===---------------------------------------------------------------------===// - The generated code on x86 for checking for signed overflow on a multiply the obvious way is much longer than it needs to be. @@ -1870,3 +1863,100 @@ The code produced by gcc is 3 bytes shorter. This sort of construct often shows up with bitfields. //===---------------------------------------------------------------------===// + +Take the following C code: +int f(int a, int b) { return (unsigned char)a == (unsigned char)b; } + +We generate the following IR with clang: +define i32 @f(i32 %a, i32 %b) nounwind readnone { +entry: + %tmp = xor i32 %b, %a ; <i32> [#uses=1] + %tmp6 = and i32 %tmp, 255 ; <i32> [#uses=1] + %cmp = icmp eq i32 %tmp6, 0 ; <i1> [#uses=1] + %conv5 = zext i1 %cmp to i32 ; <i32> [#uses=1] + ret i32 %conv5 +} + +And the following x86 code: + xorl %esi, %edi + testb $-1, %dil + sete %al + movzbl %al, %eax + ret + +A cmpb instead of the xorl+testb would be one instruction shorter. + +//===---------------------------------------------------------------------===// + +Given the following C code: +int f(int a, int b) { return (signed char)a == (signed char)b; } + +We generate the following IR with clang: +define i32 @f(i32 %a, i32 %b) nounwind readnone { +entry: + %sext = shl i32 %a, 24 ; <i32> [#uses=1] + %conv1 = ashr i32 %sext, 24 ; <i32> [#uses=1] + %sext6 = shl i32 %b, 24 ; <i32> [#uses=1] + %conv4 = ashr i32 %sext6, 24 ; <i32> [#uses=1] + %cmp = icmp eq i32 %conv1, %conv4 ; <i1> [#uses=1] + %conv5 = zext i1 %cmp to i32 ; <i32> [#uses=1] + ret i32 %conv5 +} + +And the following x86 code: + movsbl %sil, %eax + movsbl %dil, %ecx + cmpl %eax, %ecx + sete %al + movzbl %al, %eax + ret + + +It should be possible to eliminate the sign extensions. + +//===---------------------------------------------------------------------===// + +LLVM misses a load+store narrowing opportunity in this code: + +%struct.bf = type { i64, i16, i16, i32 } + +@bfi = external global %struct.bf* ; <%struct.bf**> [#uses=2] + +define void @t1() nounwind ssp { +entry: + %0 = load %struct.bf** @bfi, align 8 ; <%struct.bf*> [#uses=1] + %1 = getelementptr %struct.bf* %0, i64 0, i32 1 ; <i16*> [#uses=1] + %2 = bitcast i16* %1 to i32* ; <i32*> [#uses=2] + %3 = load i32* %2, align 1 ; <i32> [#uses=1] + %4 = and i32 %3, -65537 ; <i32> [#uses=1] + store i32 %4, i32* %2, align 1 + %5 = load %struct.bf** @bfi, align 8 ; <%struct.bf*> [#uses=1] + %6 = getelementptr %struct.bf* %5, i64 0, i32 1 ; <i16*> [#uses=1] + %7 = bitcast i16* %6 to i32* ; <i32*> [#uses=2] + %8 = load i32* %7, align 1 ; <i32> [#uses=1] + %9 = and i32 %8, -131073 ; <i32> [#uses=1] + store i32 %9, i32* %7, align 1 + ret void +} + +LLVM currently emits this: + + movq bfi(%rip), %rax + andl $-65537, 8(%rax) + movq bfi(%rip), %rax + andl $-131073, 8(%rax) + ret + +It could narrow the loads and stores to emit this: + + movq bfi(%rip), %rax + andb $-2, 10(%rax) + movq bfi(%rip), %rax + andb $-3, 10(%rax) + ret + +The trouble is that there is a TokenFactor between the store and the +load, making it non-trivial to determine if there's anything between +the load and the store which would prohibit narrowing. + +//===---------------------------------------------------------------------===// diff --git a/lib/Target/X86/SSEDomainFix.cpp b/lib/Target/X86/SSEDomainFix.cpp index dab070e1febd..13680c592e01 100644 --- a/lib/Target/X86/SSEDomainFix.cpp +++ b/lib/Target/X86/SSEDomainFix.cpp @@ -115,7 +115,7 @@ class SSEDomainFixPass : public MachineFunctionPass { unsigned Distance; public: - SSEDomainFixPass() : MachineFunctionPass(&ID) {} + SSEDomainFixPass() : MachineFunctionPass(ID) {} virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesAll(); diff --git a/lib/Target/X86/X86.h b/lib/Target/X86/X86.h index 677781d3730e..27e88505150b 100644 --- a/lib/Target/X86/X86.h +++ b/lib/Target/X86/X86.h @@ -49,11 +49,6 @@ FunctionPass *createX86FloatingPointStackifierPass(); /// crossings. FunctionPass *createSSEDomainFixPass(); -/// createX87FPRegKillInserterPass - This function returns a pass which -/// inserts FP_REG_KILL instructions where needed. -/// -FunctionPass *createX87FPRegKillInserterPass(); - /// createX86CodeEmitterPass - Return a pass that emits the collected X86 code /// to the specified MCE object. FunctionPass *createX86JITCodeEmitterPass(X86TargetMachine &TM, diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index a53f973c1c43..a19f1acffaca 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -67,6 +67,8 @@ def FeatureSSE4A : SubtargetFeature<"sse4a", "HasSSE4A", "true", def FeatureAVX : SubtargetFeature<"avx", "HasAVX", "true", "Enable AVX instructions">; +def FeatureCLMUL : SubtargetFeature<"clmul", "HasCLMUL", "true", + "Enable carry-less multiplication instructions">; def FeatureFMA3 : SubtargetFeature<"fma3", "HasFMA3", "true", "Enable three-operand fused multiple-add">; def FeatureFMA4 : SubtargetFeature<"fma4", "HasFMA4", "true", @@ -180,8 +182,6 @@ include "X86CallingConv.td" // Currently the X86 assembly parser only supports ATT syntax. def ATTAsmParser : AsmParser { string AsmParserClassName = "ATTAsmParser"; - string AsmParserInstCleanup = "InstructionCleanup"; - string MatchInstructionName = "MatchInstructionImpl"; int Variant = 0; // Discard comments in assembly strings. diff --git a/lib/Target/X86/X86AsmBackend.cpp b/lib/Target/X86/X86AsmBackend.cpp index 2cf65c11f94a..69dc967f9d88 100644 --- a/lib/Target/X86/X86AsmBackend.cpp +++ b/lib/Target/X86/X86AsmBackend.cpp @@ -11,9 +11,11 @@ #include "X86.h" #include "X86FixupKinds.h" #include "llvm/ADT/Twine.h" +#include "llvm/MC/ELFObjectWriter.h" #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCExpr.h" #include "llvm/MC/MCObjectWriter.h" +#include "llvm/MC/MCSectionCOFF.h" #include "llvm/MC/MCSectionELF.h" #include "llvm/MC/MCSectionMachO.h" #include "llvm/MC/MachObjectWriter.h" @@ -190,10 +192,6 @@ public: HasScatteredSymbols = true; } - MCObjectWriter *createObjectWriter(raw_ostream &OS) const { - return 0; - } - bool isVirtualSection(const MCSection &Section) const { const MCSectionELF &SE = static_cast<const MCSectionELF&>(Section); return SE.getType() == MCSectionELF::SHT_NOBITS;; @@ -204,12 +202,43 @@ class ELFX86_32AsmBackend : public ELFX86AsmBackend { public: ELFX86_32AsmBackend(const Target &T) : ELFX86AsmBackend(T) {} + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return new ELFObjectWriter(OS, /*Is64Bit=*/false, + /*IsLittleEndian=*/true, + /*HasRelocationAddend=*/false); + } }; class ELFX86_64AsmBackend : public ELFX86AsmBackend { public: ELFX86_64AsmBackend(const Target &T) : ELFX86AsmBackend(T) {} + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return new ELFObjectWriter(OS, /*Is64Bit=*/true, + /*IsLittleEndian=*/true, + /*HasRelocationAddend=*/true); + } +}; + +class WindowsX86AsmBackend : public X86AsmBackend { + bool Is64Bit; +public: + WindowsX86AsmBackend(const Target &T, bool is64Bit) + : X86AsmBackend(T) + , Is64Bit(is64Bit) { + HasScatteredSymbols = true; + } + + MCObjectWriter *createObjectWriter(raw_ostream &OS) const { + return createWinCOFFObjectWriter(OS, Is64Bit); + } + + bool isVirtualSection(const MCSection &Section) const { + const MCSectionCOFF &SE = static_cast<const MCSectionCOFF&>(Section); + return SE.getCharacteristics() & COFF::IMAGE_SCN_CNT_UNINITIALIZED_DATA; + } }; class DarwinX86AsmBackend : public X86AsmBackend { @@ -290,6 +319,10 @@ TargetAsmBackend *llvm::createX86_32AsmBackend(const Target &T, switch (Triple(TT).getOS()) { case Triple::Darwin: return new DarwinX86_32AsmBackend(T); + case Triple::MinGW32: + case Triple::Cygwin: + case Triple::Win32: + return new WindowsX86AsmBackend(T, false); default: return new ELFX86_32AsmBackend(T); } @@ -300,6 +333,10 @@ TargetAsmBackend *llvm::createX86_64AsmBackend(const Target &T, switch (Triple(TT).getOS()) { case Triple::Darwin: return new DarwinX86_64AsmBackend(T); + case Triple::MinGW64: + case Triple::Cygwin: + case Triple::Win32: + return new WindowsX86AsmBackend(T, true); default: return new ELFX86_64AsmBackend(T); } diff --git a/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp b/lib/Target/X86/X86AsmPrinter.cpp index 08e6486d5b7a..20110ad788cd 100644 --- a/lib/Target/X86/AsmPrinter/X86AsmPrinter.cpp +++ b/lib/Target/X86/X86AsmPrinter.cpp @@ -13,8 +13,8 @@ //===----------------------------------------------------------------------===// #include "X86AsmPrinter.h" -#include "X86ATTInstPrinter.h" -#include "X86IntelInstPrinter.h" +#include "AsmPrinter/X86ATTInstPrinter.h" +#include "AsmPrinter/X86IntelInstPrinter.h" #include "X86MCInstLower.h" #include "X86.h" #include "X86COFFMachineModuleInfo.h" @@ -24,6 +24,7 @@ #include "llvm/DerivedTypes.h" #include "llvm/Module.h" #include "llvm/Type.h" +#include "llvm/Analysis/DebugInfo.h" #include "llvm/Assembly/Writer.h" #include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCContext.h" @@ -35,6 +36,7 @@ #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h" #include "llvm/Support/COFF.h" +#include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Target/Mangler.h" #include "llvm/Target/TargetOptions.h" @@ -218,6 +220,10 @@ void X86AsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo, const MachineOperand &MO = MI->getOperand(OpNo); switch (MO.getType()) { default: llvm_unreachable("Unknown pcrel immediate operand"); + case MachineOperand::MO_Register: + // pc-relativeness was handled when computing the value in the reg. + printOperand(MI, OpNo, O); + return; case MachineOperand::MO_Immediate: O << MO.getImm(); return; @@ -655,6 +661,47 @@ void X86AsmPrinter::EmitEndOfAsmFile(Module &M) { } } +MachineLocation +X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { + MachineLocation Location; + assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!"); + // Frame address. Currently handles register +- offset only. + + if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm()) + Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm()); + else { + DEBUG(dbgs() << "DBG_VALUE instruction ignored! " << *MI << "\n"); + } + return Location; +} + +void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI, + raw_ostream &O) { + // Only the target-dependent form of DBG_VALUE should get here. + // Referencing the offset and metadata as NOps-2 and NOps-1 is + // probably portable to other targets; frame pointer location is not. + unsigned NOps = MI->getNumOperands(); + assert(NOps==7); + O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; + // cast away const; DIetc do not take const operands for some reason. + DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); + if (V.getContext().isSubprogram()) + O << DISubprogram(V.getContext()).getDisplayName() << ":"; + O << V.getName(); + O << " <- "; + // Frame address. Currently handles register +- offset only. + O << '['; + if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg()) + printOperand(MI, 0, O); + else + O << "undef"; + O << '+'; printOperand(MI, 3, O); + O << ']'; + O << "+"; + printOperand(MI, NOps-2, O); +} + + //===----------------------------------------------------------------------===// // Target Registry Stuff diff --git a/lib/Target/X86/AsmPrinter/X86AsmPrinter.h b/lib/Target/X86/X86AsmPrinter.h index b5a7f8dc321a..e61be66c75a2 100644 --- a/lib/Target/X86/AsmPrinter/X86AsmPrinter.h +++ b/lib/Target/X86/X86AsmPrinter.h @@ -14,9 +14,9 @@ #ifndef X86ASMPRINTER_H #define X86ASMPRINTER_H -#include "../X86.h" -#include "../X86MachineFunctionInfo.h" -#include "../X86TargetMachine.h" +#include "X86.h" +#include "X86MachineFunctionInfo.h" +#include "X86TargetMachine.h" #include "llvm/ADT/StringSet.h" #include "llvm/CodeGen/AsmPrinter.h" #include "llvm/CodeGen/MachineModuleInfo.h" diff --git a/lib/Target/X86/X86CallingConv.td b/lib/Target/X86/X86CallingConv.td index a6a1e4e573cf..e3409effc318 100644 --- a/lib/Target/X86/X86CallingConv.td +++ b/lib/Target/X86/X86CallingConv.td @@ -33,13 +33,19 @@ def RetCC_X86Common : CallingConv<[ CCIfType<[i16], CCAssignToReg<[AX, DX]>>, CCIfType<[i32], CCAssignToReg<[EAX, EDX]>>, CCIfType<[i64], CCAssignToReg<[RAX, RDX]>>, - - // Vector types are returned in XMM0 and XMM1, when they fit. XMMM2 and XMM3 + + // Vector types are returned in XMM0 and XMM1, when they fit. XMM2 and XMM3 // can only be used by ABI non-compliant code. If the target doesn't have XMM // registers, it won't have vector types. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>, + // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3 + // can only be used by ABI non-compliant code. This vector type is only + // supported while using the AVX target feature. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCIfSubtarget<"hasAVX()", CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>>, + // MMX vector types are always returned in MM0. If the target doesn't have // MM0, it doesn't support these vector types. CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToReg<[MM0]>>, @@ -164,11 +170,16 @@ def CC_X86_64_C : CallingConv<[ CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCIfSubtarget<"hasSSE1()", CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>, - + + // The first 8 256-bit vector arguments are passed in YMM registers. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCIfSubtarget<"hasAVX()", + CCAssignToReg<[YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7]>>>, + // Integer/FP values get stored in stack slots that are 8 bytes in size and // 8-byte aligned if there are no more registers to hold them. CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>, - + // Long doubles get stack slots whose size and alignment depends on the // subtarget. CCIfType<[f80], CCAssignToStack<0, 0>>, @@ -176,6 +187,10 @@ def CC_X86_64_C : CallingConv<[ // Vectors get 16-byte stack slots that are 16-byte aligned. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, + // 256-bit vectors get 32-byte stack slots that are 32-byte aligned. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCAssignToStack<32, 32>>, + // __m64 vectors get 8-byte stack slots that are 8-byte aligned. CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 8>> ]>; @@ -271,9 +286,18 @@ def CC_X86_32_Common : CallingConv<[ CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>, + // The first 4 AVX 256-bit vector arguments are passed in YMM registers. + CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCIfSubtarget<"hasAVX()", + CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>, + // Other SSE vectors get 16-byte stack slots that are 16-byte aligned. CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, + // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned. + CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], + CCAssignToStack<32, 32>>, + // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are // passed in the parameter area. CCIfType<[v8i8, v4i16, v2i32, v1i64], CCAssignToStack<8, 4>>]>; diff --git a/lib/Target/X86/X86CodeEmitter.cpp b/lib/Target/X86/X86CodeEmitter.cpp index f13669bd741d..824021c0c882 100644 --- a/lib/Target/X86/X86CodeEmitter.cpp +++ b/lib/Target/X86/X86CodeEmitter.cpp @@ -53,12 +53,12 @@ namespace { public: static char ID; explicit Emitter(X86TargetMachine &tm, CodeEmitter &mce) - : MachineFunctionPass(&ID), II(0), TD(0), TM(tm), + : MachineFunctionPass(ID), II(0), TD(0), TM(tm), MCE(mce), PICBaseOffset(0), Is64BitMode(false), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} Emitter(X86TargetMachine &tm, CodeEmitter &mce, const X86InstrInfo &ii, const TargetData &td, bool is64) - : MachineFunctionPass(&ID), II(&ii), TD(&td), TM(tm), + : MachineFunctionPass(ID), II(&ii), TD(&td), TM(tm), MCE(mce), PICBaseOffset(0), Is64BitMode(is64), IsPIC(TM.getRelocationModel() == Reloc::PIC_) {} @@ -146,6 +146,103 @@ bool Emitter<CodeEmitter>::runOnMachineFunction(MachineFunction &MF) { return false; } +/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64 +/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand +/// size, and 3) use of X86-64 extended registers. +static unsigned determineREX(const MachineInstr &MI) { + unsigned REX = 0; + const TargetInstrDesc &Desc = MI.getDesc(); + + // Pseudo instructions do not need REX prefix byte. + if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo) + return 0; + if (Desc.TSFlags & X86II::REX_W) + REX |= 1 << 3; + + unsigned NumOps = Desc.getNumOperands(); + if (NumOps) { + bool isTwoAddr = NumOps > 1 && + Desc.getOperandConstraint(1, TOI::TIED_TO) != -1; + + // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. + unsigned i = isTwoAddr ? 1 : 0; + for (unsigned e = NumOps; i != e; ++i) { + const MachineOperand& MO = MI.getOperand(i); + if (MO.isReg()) { + unsigned Reg = MO.getReg(); + if (X86InstrInfo::isX86_64NonExtLowByteReg(Reg)) + REX |= 0x40; + } + } + + switch (Desc.TSFlags & X86II::FormMask) { + case X86II::MRMInitReg: + if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0))) + REX |= (1 << 0) | (1 << 2); + break; + case X86II::MRMSrcReg: { + if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0))) + REX |= 1 << 2; + i = isTwoAddr ? 2 : 1; + for (unsigned e = NumOps; i != e; ++i) { + const MachineOperand& MO = MI.getOperand(i); + if (X86InstrInfo::isX86_64ExtendedReg(MO)) + REX |= 1 << 0; + } + break; + } + case X86II::MRMSrcMem: { + if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0))) + REX |= 1 << 2; + unsigned Bit = 0; + i = isTwoAddr ? 2 : 1; + for (; i != NumOps; ++i) { + const MachineOperand& MO = MI.getOperand(i); + if (MO.isReg()) { + if (X86InstrInfo::isX86_64ExtendedReg(MO)) + REX |= 1 << Bit; + Bit++; + } + } + break; + } + case X86II::MRM0m: case X86II::MRM1m: + case X86II::MRM2m: case X86II::MRM3m: + case X86II::MRM4m: case X86II::MRM5m: + case X86II::MRM6m: case X86II::MRM7m: + case X86II::MRMDestMem: { + unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands); + i = isTwoAddr ? 1 : 0; + if (NumOps > e && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(e))) + REX |= 1 << 2; + unsigned Bit = 0; + for (; i != e; ++i) { + const MachineOperand& MO = MI.getOperand(i); + if (MO.isReg()) { + if (X86InstrInfo::isX86_64ExtendedReg(MO)) + REX |= 1 << Bit; + Bit++; + } + } + break; + } + default: { + if (X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(0))) + REX |= 1 << 0; + i = isTwoAddr ? 2 : 1; + for (unsigned e = NumOps; i != e; ++i) { + const MachineOperand& MO = MI.getOperand(i); + if (X86InstrInfo::isX86_64ExtendedReg(MO)) + REX |= 1 << 2; + } + break; + } + } + } + return REX; +} + + /// emitPCRelativeBlockAddress - This method keeps track of the information /// necessary to resolve the address of this block later and emits a dummy /// value. @@ -569,7 +666,7 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI, // Handle REX prefix. if (Is64BitMode) { - if (unsigned REX = X86InstrInfo::determineREX(MI)) + if (unsigned REX = determineREX(MI)) MCE.emitByte(0x40 | REX); } @@ -605,24 +702,29 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI, // base address. switch (Opcode) { default: - llvm_unreachable("psuedo instructions should be removed before code" + llvm_unreachable("pseudo instructions should be removed before code" " emission"); break; + // Do nothing for Int_MemBarrier - it's just a comment. Add a debug + // to make it slightly easier to see. + case X86::Int_MemBarrier: + DEBUG(dbgs() << "#MEMBARRIER\n"); + break; + case TargetOpcode::INLINEASM: // We allow inline assembler nodes with empty bodies - they can // implicitly define registers, which is ok for JIT. if (MI.getOperand(0).getSymbolName()[0]) report_fatal_error("JIT does not support inline asm!"); break; - case TargetOpcode::DBG_LABEL: + case TargetOpcode::PROLOG_LABEL: case TargetOpcode::GC_LABEL: case TargetOpcode::EH_LABEL: MCE.emitLabel(MI.getOperand(0).getMCSymbol()); break; - + case TargetOpcode::IMPLICIT_DEF: case TargetOpcode::KILL: - case X86::FP_REG_KILL: break; case X86::MOVPC32r: { // This emits the "call" portion of this pseudo instruction. @@ -674,7 +776,8 @@ void Emitter<CodeEmitter>::emitInstruction(const MachineInstr &MI, } assert(MO.isImm() && "Unknown RawFrm operand!"); - if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32) { + if (Opcode == X86::CALLpcrel32 || Opcode == X86::CALL64pcrel32 || + Opcode == X86::WINCALL64pcrel32) { // Fix up immediate operand for pc relative calls. intptr_t Imm = (intptr_t)MO.getImm(); Imm = Imm - MCE.getCurrentPCValue() - 4; diff --git a/lib/Target/X86/X86FastISel.cpp b/lib/Target/X86/X86FastISel.cpp index ce1370763b77..0c70eec4827f 100644 --- a/lib/Target/X86/X86FastISel.cpp +++ b/lib/Target/X86/X86FastISel.cpp @@ -960,9 +960,11 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { MachineBasicBlock *TrueMBB = FuncInfo.MBBMap[BI->getSuccessor(0)]; MachineBasicBlock *FalseMBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; - // Fold the common case of a conditional branch with a comparison. + // Fold the common case of a conditional branch with a comparison + // in the same block (values defined on other blocks may not have + // initialized registers). if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { - if (CI->hasOneUse()) { + if (CI->hasOneUse() && CI->getParent() == I->getParent()) { EVT VT = TLI.getValueType(CI->getOperand(0)->getType()); // Try to take advantage of fallthrough opportunities. @@ -1058,10 +1060,8 @@ bool X86FastISel::X86SelectBranch(const Instruction *I) { const MachineInstr &MI = *RI; if (MI.definesRegister(Reg)) { - unsigned Src, Dst, SrcSR, DstSR; - - if (getInstrInfo()->isMoveInstr(MI, Src, Dst, SrcSR, DstSR)) { - Reg = Src; + if (MI.isCopy()) { + Reg = MI.getOperand(1).getReg(); continue; } @@ -1648,15 +1648,26 @@ bool X86FastISel::X86SelectCall(const Instruction *I) { MachineInstrBuilder MIB; if (CalleeOp) { // Register-indirect call. - unsigned CallOpc = Subtarget->is64Bit() ? X86::CALL64r : X86::CALL32r; + unsigned CallOpc; + if (Subtarget->isTargetWin64()) + CallOpc = X86::WINCALL64r; + else if (Subtarget->is64Bit()) + CallOpc = X86::CALL64r; + else + CallOpc = X86::CALL32r; MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DL, TII.get(CallOpc)) .addReg(CalleeOp); } else { // Direct call. assert(GV && "Not a direct call"); - unsigned CallOpc = - Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32; + unsigned CallOpc; + if (Subtarget->isTargetWin64()) + CallOpc = X86::WINCALL64pcrel32; + else if (Subtarget->is64Bit()) + CallOpc = X86::CALL64pcrel32; + else + CallOpc = X86::CALLpcrel32; // See if we need any target-specific flags on the GV operand. unsigned char OpFlags = 0; diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index cee4ad70201a..e6ebf669587d 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -8,23 +8,18 @@ //===----------------------------------------------------------------------===// // // This file defines the pass which converts floating point instructions from -// virtual registers into register stack instructions. This pass uses live +// pseudo registers into register stack instructions. This pass uses live // variable information to indicate where the FPn registers are used and their // lifetimes. // -// This pass is hampered by the lack of decent CFG manipulation routines for -// machine code. In particular, this wants to be able to split critical edges -// as necessary, traverse the machine basic block CFG in depth-first order, and -// allow there to be multiple machine basic blocks for each LLVM basicblock -// (needed for critical edge splitting). +// The x87 hardware tracks liveness of the stack registers, so it is necessary +// to implement exact liveness tracking between basic blocks. The CFG edges are +// partitioned into bundles where the same FP registers must be live in +// identical stack positions. Instructions are inserted at the end of each basic +// block to rearrange the live registers to match the outgoing bundle. // -// In particular, this pass currently barfs on critical edges. Because of this, -// it requires the instruction selector to insert FP_REG_KILL instructions on -// the exits of any basic block that has critical edges going from it, or which -// branch to a critical basic block. -// -// FIXME: this is not implemented yet. The stackifier pass only works on local -// basic blocks. +// This approach avoids splitting critical edges at the potential cost of more +// live register shuffling instructions when critical edges are present. // //===----------------------------------------------------------------------===// @@ -32,6 +27,7 @@ #include "X86.h" #include "X86InstrInfo.h" #include "llvm/ADT/DepthFirstIterator.h" +#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -54,7 +50,12 @@ STATISTIC(NumFP , "Number of floating point instructions"); namespace { struct FPS : public MachineFunctionPass { static char ID; - FPS() : MachineFunctionPass(&ID) {} + FPS() : MachineFunctionPass(ID) { + // This is really only to keep valgrind quiet. + // The logic in isLive() is too much for it. + memset(Stack, 0, sizeof(Stack)); + memset(RegMap, 0, sizeof(RegMap)); + } virtual void getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); @@ -69,11 +70,71 @@ namespace { private: const TargetInstrInfo *TII; // Machine instruction info. + + // Two CFG edges are related if they leave the same block, or enter the same + // block. The transitive closure of an edge under this relation is a + // LiveBundle. It represents a set of CFG edges where the live FP stack + // registers must be allocated identically in the x87 stack. + // + // A LiveBundle is usually all the edges leaving a block, or all the edges + // entering a block, but it can contain more edges if critical edges are + // present. + // + // The set of live FP registers in a LiveBundle is calculated by bundleCFG, + // but the exact mapping of FP registers to stack slots is fixed later. + struct LiveBundle { + // Bit mask of live FP registers. Bit 0 = FP0, bit 1 = FP1, &c. + unsigned Mask; + + // Number of pre-assigned live registers in FixStack. This is 0 when the + // stack order has not yet been fixed. + unsigned FixCount; + + // Assigned stack order for live-in registers. + // FixStack[i] == getStackEntry(i) for all i < FixCount. + unsigned char FixStack[8]; + + LiveBundle(unsigned m = 0) : Mask(m), FixCount(0) {} + + // Have the live registers been assigned a stack order yet? + bool isFixed() const { return !Mask || FixCount; } + }; + + // Numbered LiveBundle structs. LiveBundles[0] is used for all CFG edges + // with no live FP registers. + SmallVector<LiveBundle, 8> LiveBundles; + + // Map each MBB in the current function to an (ingoing, outgoing) index into + // LiveBundles. Blocks with no FP registers live in or out map to (0, 0) + // and are not actually stored in the map. + DenseMap<MachineBasicBlock*, std::pair<unsigned, unsigned> > BlockBundle; + + // Return a bitmask of FP registers in block's live-in list. + unsigned calcLiveInMask(MachineBasicBlock *MBB) { + unsigned Mask = 0; + for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(), + E = MBB->livein_end(); I != E; ++I) { + unsigned Reg = *I - X86::FP0; + if (Reg < 8) + Mask |= 1 << Reg; + } + return Mask; + } + + // Partition all the CFG edges into LiveBundles. + void bundleCFG(MachineFunction &MF); + MachineBasicBlock *MBB; // Current basic block unsigned Stack[8]; // FP<n> Registers in each stack slot... unsigned RegMap[8]; // Track which stack slot contains each register unsigned StackTop; // The current top of the FP stack. + // Set up our stack model to match the incoming registers to MBB. + void setupBlockStack(); + + // Shuffle live registers to match the expectations of successor blocks. + void finishBlockStack(); + void dumpStack() const { dbgs() << "Stack contents:"; for (unsigned i = 0; i != StackTop; ++i) { @@ -82,27 +143,36 @@ namespace { } dbgs() << "\n"; } - private: - /// isStackEmpty - Return true if the FP stack is empty. - bool isStackEmpty() const { - return StackTop == 0; - } - - // getSlot - Return the stack slot number a particular register number is - // in. + + /// getSlot - Return the stack slot number a particular register number is + /// in. unsigned getSlot(unsigned RegNo) const { assert(RegNo < 8 && "Regno out of range!"); return RegMap[RegNo]; } - // getStackEntry - Return the X86::FP<n> register in register ST(i). + /// isLive - Is RegNo currently live in the stack? + bool isLive(unsigned RegNo) const { + unsigned Slot = getSlot(RegNo); + return Slot < StackTop && Stack[Slot] == RegNo; + } + + /// getScratchReg - Return an FP register that is not currently in use. + unsigned getScratchReg() { + for (int i = 7; i >= 0; --i) + if (!isLive(i)) + return i; + llvm_unreachable("Ran out of scratch FP registers"); + } + + /// getStackEntry - Return the X86::FP<n> register in register ST(i). unsigned getStackEntry(unsigned STi) const { assert(STi < StackTop && "Access past stack top!"); return Stack[StackTop-1-STi]; } - // getSTReg - Return the X86::ST(i) register which contains the specified - // FP<RegNo> register. + /// getSTReg - Return the X86::ST(i) register which contains the specified + /// FP<RegNo> register. unsigned getSTReg(unsigned RegNo) const { return StackTop - 1 - getSlot(RegNo) + llvm::X86::ST0; } @@ -117,10 +187,9 @@ namespace { bool isAtTop(unsigned RegNo) const { return getSlot(RegNo) == StackTop-1; } void moveToTop(unsigned RegNo, MachineBasicBlock::iterator I) { - MachineInstr *MI = I; - DebugLoc dl = MI->getDebugLoc(); + DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc(); if (isAtTop(RegNo)) return; - + unsigned STReg = getSTReg(RegNo); unsigned RegOnTop = getStackEntry(0); @@ -137,24 +206,37 @@ namespace { } void duplicateToTop(unsigned RegNo, unsigned AsReg, MachineInstr *I) { - DebugLoc dl = I->getDebugLoc(); + DebugLoc dl = I == MBB->end() ? DebugLoc() : I->getDebugLoc(); unsigned STReg = getSTReg(RegNo); pushReg(AsReg); // New register on top of stack BuildMI(*MBB, I, dl, TII->get(X86::LD_Frr)).addReg(STReg); } - // popStackAfter - Pop the current value off of the top of the FP stack - // after the specified instruction. + /// popStackAfter - Pop the current value off of the top of the FP stack + /// after the specified instruction. void popStackAfter(MachineBasicBlock::iterator &I); - // freeStackSlotAfter - Free the specified register from the register stack, - // so that it is no longer in a register. If the register is currently at - // the top of the stack, we just pop the current instruction, otherwise we - // store the current top-of-stack into the specified slot, then pop the top - // of stack. + /// freeStackSlotAfter - Free the specified register from the register + /// stack, so that it is no longer in a register. If the register is + /// currently at the top of the stack, we just pop the current instruction, + /// otherwise we store the current top-of-stack into the specified slot, + /// then pop the top of stack. void freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned Reg); + /// freeStackSlotBefore - Just the pop, no folding. Return the inserted + /// instruction. + MachineBasicBlock::iterator + freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo); + + /// Adjust the live registers to be the set in Mask. + void adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I); + + /// Shuffle the top FixCount stack entries susch that FP reg FixStack[0] is + /// st(0), FP reg FixStack[1] is st(1) etc. + void shuffleStackTop(const unsigned char *FixStack, unsigned FixCount, + MachineBasicBlock::iterator I); + bool processBasicBlock(MachineFunction &MF, MachineBasicBlock &MBB); void handleZeroArgFP(MachineBasicBlock::iterator &I); @@ -181,7 +263,6 @@ static unsigned getFPReg(const MachineOperand &MO) { return Reg - X86::FP0; } - /// runOnMachineFunction - Loop over all of the basic blocks, transforming FP /// register references into FP stack references. /// @@ -201,6 +282,10 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) { if (!FPIsUsed) return false; TII = MF.getTarget().getInstrInfo(); + + // Prepare cross-MBB liveness. + bundleCFG(MF); + StackTop = 0; // Process the function in depth first order so that we process at least one @@ -215,16 +300,111 @@ bool FPS::runOnMachineFunction(MachineFunction &MF) { Changed |= processBasicBlock(MF, **I); // Process any unreachable blocks in arbitrary order now. - if (MF.size() == Processed.size()) - return Changed; + if (MF.size() != Processed.size()) + for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) + if (Processed.insert(BB)) + Changed |= processBasicBlock(MF, *BB); + + BlockBundle.clear(); + LiveBundles.clear(); - for (MachineFunction::iterator BB = MF.begin(), E = MF.end(); BB != E; ++BB) - if (Processed.insert(BB)) - Changed |= processBasicBlock(MF, *BB); - return Changed; } +/// bundleCFG - Scan all the basic blocks to determine consistent live-in and +/// live-out sets for the FP registers. Consistent means that the set of +/// registers live-out from a block is identical to the live-in set of all +/// successors. This is not enforced by the normal live-in lists since +/// registers may be implicitly defined, or not used by all successors. +void FPS::bundleCFG(MachineFunction &MF) { + assert(LiveBundles.empty() && "Stale data in LiveBundles"); + assert(BlockBundle.empty() && "Stale data in BlockBundle"); + SmallPtrSet<MachineBasicBlock*, 8> PropDown, PropUp; + + // LiveBundle[0] is the empty live-in set. + LiveBundles.resize(1); + + // First gather the actual live-in masks for all MBBs. + for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; ++I) { + MachineBasicBlock *MBB = I; + const unsigned Mask = calcLiveInMask(MBB); + if (!Mask) + continue; + // Ingoing bundle index. + unsigned &Idx = BlockBundle[MBB].first; + // Already assigned an ingoing bundle? + if (Idx) + continue; + // Allocate a new LiveBundle struct for this block's live-ins. + const unsigned BundleIdx = Idx = LiveBundles.size(); + DEBUG(dbgs() << "Creating LB#" << BundleIdx << ": in:BB#" + << MBB->getNumber()); + LiveBundles.push_back(Mask); + LiveBundle &Bundle = LiveBundles.back(); + + // Make sure all predecessors have the same live-out set. + PropUp.insert(MBB); + + // Keep pushing liveness up and down the CFG until convergence. + // Only critical edges cause iteration here, but when they do, multiple + // blocks can be assigned to the same LiveBundle index. + do { + // Assign BundleIdx as liveout from predecessors in PropUp. + for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropUp.begin(), + E = PropUp.end(); I != E; ++I) { + MachineBasicBlock *MBB = *I; + for (MachineBasicBlock::const_pred_iterator LinkI = MBB->pred_begin(), + LinkE = MBB->pred_end(); LinkI != LinkE; ++LinkI) { + MachineBasicBlock *PredMBB = *LinkI; + // PredMBB's liveout bundle should be set to LIIdx. + unsigned &Idx = BlockBundle[PredMBB].second; + if (Idx) { + assert(Idx == BundleIdx && "Inconsistent CFG"); + continue; + } + Idx = BundleIdx; + DEBUG(dbgs() << " out:BB#" << PredMBB->getNumber()); + // Propagate to siblings. + if (PredMBB->succ_size() > 1) + PropDown.insert(PredMBB); + } + } + PropUp.clear(); + + // Assign BundleIdx as livein to successors in PropDown. + for (SmallPtrSet<MachineBasicBlock*, 16>::iterator I = PropDown.begin(), + E = PropDown.end(); I != E; ++I) { + MachineBasicBlock *MBB = *I; + for (MachineBasicBlock::const_succ_iterator LinkI = MBB->succ_begin(), + LinkE = MBB->succ_end(); LinkI != LinkE; ++LinkI) { + MachineBasicBlock *SuccMBB = *LinkI; + // LinkMBB's livein bundle should be set to BundleIdx. + unsigned &Idx = BlockBundle[SuccMBB].first; + if (Idx) { + assert(Idx == BundleIdx && "Inconsistent CFG"); + continue; + } + Idx = BundleIdx; + DEBUG(dbgs() << " in:BB#" << SuccMBB->getNumber()); + // Propagate to siblings. + if (SuccMBB->pred_size() > 1) + PropUp.insert(SuccMBB); + // Also accumulate the bundle liveness mask from the liveins here. + Bundle.Mask |= calcLiveInMask(SuccMBB); + } + } + PropDown.clear(); + } while (!PropUp.empty()); + DEBUG({ + dbgs() << " live:"; + for (unsigned i = 0; i < 8; ++i) + if (Bundle.Mask & (1<<i)) + dbgs() << " %FP" << i; + dbgs() << '\n'; + }); + } +} + /// processBasicBlock - Loop over all of the instructions in the basic block, /// transforming FP instructions into their stack form. /// @@ -232,10 +412,12 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { bool Changed = false; MBB = &BB; + setupBlockStack(); + for (MachineBasicBlock::iterator I = BB.begin(); I != BB.end(); ++I) { MachineInstr *MI = I; uint64_t Flags = MI->getDesc().TSFlags; - + unsigned FPInstClass = Flags & X86II::FPTypeMask; if (MI->isInlineAsm()) FPInstClass = X86II::SpecialFP; @@ -302,10 +484,82 @@ bool FPS::processBasicBlock(MachineFunction &MF, MachineBasicBlock &BB) { Changed = true; } - assert(isStackEmpty() && "Stack not empty at end of basic block?"); + finishBlockStack(); + return Changed; } +/// setupBlockStack - Use the BlockBundle map to set up our model of the stack +/// to match predecessors' live out stack. +void FPS::setupBlockStack() { + DEBUG(dbgs() << "\nSetting up live-ins for BB#" << MBB->getNumber() + << " derived from " << MBB->getName() << ".\n"); + StackTop = 0; + const LiveBundle &Bundle = LiveBundles[BlockBundle.lookup(MBB).first]; + + if (!Bundle.Mask) { + DEBUG(dbgs() << "Block has no FP live-ins.\n"); + return; + } + + // Depth-first iteration should ensure that we always have an assigned stack. + assert(Bundle.isFixed() && "Reached block before any predecessors"); + + // Push the fixed live-in registers. + for (unsigned i = Bundle.FixCount; i > 0; --i) { + MBB->addLiveIn(X86::ST0+i-1); + DEBUG(dbgs() << "Live-in st(" << (i-1) << "): %FP" + << unsigned(Bundle.FixStack[i-1]) << '\n'); + pushReg(Bundle.FixStack[i-1]); + } + + // Kill off unwanted live-ins. This can happen with a critical edge. + // FIXME: We could keep these live registers around as zombies. They may need + // to be revived at the end of a short block. It might save a few instrs. + adjustLiveRegs(calcLiveInMask(MBB), MBB->begin()); + DEBUG(MBB->dump()); +} + +/// finishBlockStack - Revive live-outs that are implicitly defined out of +/// MBB. Shuffle live registers to match the expected fixed stack of any +/// predecessors, and ensure that all predecessors are expecting the same +/// stack. +void FPS::finishBlockStack() { + // The RET handling below takes care of return blocks for us. + if (MBB->succ_empty()) + return; + + DEBUG(dbgs() << "Setting up live-outs for BB#" << MBB->getNumber() + << " derived from " << MBB->getName() << ".\n"); + + unsigned BundleIdx = BlockBundle.lookup(MBB).second; + LiveBundle &Bundle = LiveBundles[BundleIdx]; + + // We may need to kill and define some registers to match successors. + // FIXME: This can probably be combined with the shuffle below. + MachineBasicBlock::iterator Term = MBB->getFirstTerminator(); + adjustLiveRegs(Bundle.Mask, Term); + + if (!Bundle.Mask) { + DEBUG(dbgs() << "No live-outs.\n"); + return; + } + + // Has the stack order been fixed yet? + DEBUG(dbgs() << "LB#" << BundleIdx << ": "); + if (Bundle.isFixed()) { + DEBUG(dbgs() << "Shuffling stack to match.\n"); + shuffleStackTop(Bundle.FixStack, Bundle.FixCount, Term); + } else { + // Not fixed yet, we get to choose. + DEBUG(dbgs() << "Fixing stack order now.\n"); + Bundle.FixCount = StackTop; + for (unsigned i = 0; i < StackTop; ++i) + Bundle.FixStack[i] = getStackEntry(i); + } +} + + //===----------------------------------------------------------------------===// // Efficient Lookup Table Support //===----------------------------------------------------------------------===// @@ -318,7 +572,7 @@ namespace { friend bool operator<(const TableEntry &TE, unsigned V) { return TE.from < V; } - friend bool operator<(unsigned V, const TableEntry &TE) { + friend bool ATTRIBUTE_USED operator<(unsigned V, const TableEntry &TE) { return V < TE.from; } }; @@ -597,6 +851,13 @@ void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) { // Otherwise, store the top of stack into the dead slot, killing the operand // without having to add in an explicit xchg then pop. // + I = freeStackSlotBefore(++I, FPRegNo); +} + +/// freeStackSlotBefore - Free the specified register without trying any +/// folding. +MachineBasicBlock::iterator +FPS::freeStackSlotBefore(MachineBasicBlock::iterator I, unsigned FPRegNo) { unsigned STReg = getSTReg(FPRegNo); unsigned OldSlot = getSlot(FPRegNo); unsigned TopReg = Stack[StackTop-1]; @@ -604,9 +865,90 @@ void FPS::freeStackSlotAfter(MachineBasicBlock::iterator &I, unsigned FPRegNo) { RegMap[TopReg] = OldSlot; RegMap[FPRegNo] = ~0; Stack[--StackTop] = ~0; - MachineInstr *MI = I; - DebugLoc dl = MI->getDebugLoc(); - I = BuildMI(*MBB, ++I, dl, TII->get(X86::ST_FPrr)).addReg(STReg); + return BuildMI(*MBB, I, DebugLoc(), TII->get(X86::ST_FPrr)).addReg(STReg); +} + +/// adjustLiveRegs - Kill and revive registers such that exactly the FP +/// registers with a bit in Mask are live. +void FPS::adjustLiveRegs(unsigned Mask, MachineBasicBlock::iterator I) { + unsigned Defs = Mask; + unsigned Kills = 0; + for (unsigned i = 0; i < StackTop; ++i) { + unsigned RegNo = Stack[i]; + if (!(Defs & (1 << RegNo))) + // This register is live, but we don't want it. + Kills |= (1 << RegNo); + else + // We don't need to imp-def this live register. + Defs &= ~(1 << RegNo); + } + assert((Kills & Defs) == 0 && "Register needs killing and def'ing?"); + + // Produce implicit-defs for free by using killed registers. + while (Kills && Defs) { + unsigned KReg = CountTrailingZeros_32(Kills); + unsigned DReg = CountTrailingZeros_32(Defs); + DEBUG(dbgs() << "Renaming %FP" << KReg << " as imp %FP" << DReg << "\n"); + std::swap(Stack[getSlot(KReg)], Stack[getSlot(DReg)]); + std::swap(RegMap[KReg], RegMap[DReg]); + Kills &= ~(1 << KReg); + Defs &= ~(1 << DReg); + } + + // Kill registers by popping. + if (Kills && I != MBB->begin()) { + MachineBasicBlock::iterator I2 = llvm::prior(I); + for (;;) { + unsigned KReg = getStackEntry(0); + if (!(Kills & (1 << KReg))) + break; + DEBUG(dbgs() << "Popping %FP" << KReg << "\n"); + popStackAfter(I2); + Kills &= ~(1 << KReg); + } + } + + // Manually kill the rest. + while (Kills) { + unsigned KReg = CountTrailingZeros_32(Kills); + DEBUG(dbgs() << "Killing %FP" << KReg << "\n"); + freeStackSlotBefore(I, KReg); + Kills &= ~(1 << KReg); + } + + // Load zeros for all the imp-defs. + while(Defs) { + unsigned DReg = CountTrailingZeros_32(Defs); + DEBUG(dbgs() << "Defining %FP" << DReg << " as 0\n"); + BuildMI(*MBB, I, DebugLoc(), TII->get(X86::LD_F0)); + pushReg(DReg); + Defs &= ~(1 << DReg); + } + + // Now we should have the correct registers live. + DEBUG(dumpStack()); + assert(StackTop == CountPopulation_32(Mask) && "Live count mismatch"); +} + +/// shuffleStackTop - emit fxch instructions before I to shuffle the top +/// FixCount entries into the order given by FixStack. +/// FIXME: Is there a better algorithm than insertion sort? +void FPS::shuffleStackTop(const unsigned char *FixStack, + unsigned FixCount, + MachineBasicBlock::iterator I) { + // Move items into place, starting from the desired stack bottom. + while (FixCount--) { + // Old register at position FixCount. + unsigned OldReg = getStackEntry(FixCount); + // Desired register at position FixCount. + unsigned Reg = FixStack[FixCount]; + if (Reg == OldReg) + continue; + // (Reg st0) (OldReg st0) = (Reg OldReg st0) + moveToTop(Reg, I); + moveToTop(OldReg, I); + } + DEBUG(dumpStack()); } @@ -660,7 +1002,7 @@ void FPS::handleOneArgFP(MachineBasicBlock::iterator &I) { MI->getOpcode() == X86::ISTT_Fp32m80 || MI->getOpcode() == X86::ISTT_Fp64m80 || MI->getOpcode() == X86::ST_FpP80m)) { - duplicateToTop(Reg, 7 /*temp register*/, I); + duplicateToTop(Reg, getScratchReg(), I); } else { moveToTop(Reg, I); // Move to the top of the stack... } @@ -1013,8 +1355,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { if (!MI->killsRegister(X86::FP0 + Op0)) { // Duplicate Op0 into a temporary on the stack top. - // This actually assumes that FP7 is dead. - duplicateToTop(Op0, 7, I); + duplicateToTop(Op0, getScratchReg(), I); } else { // Op0 is killed, so just swap it into position. moveToTop(Op0, I); @@ -1034,8 +1375,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { ++StackTop; unsigned RegOnTop = getStackEntry(0); // This reg must remain in st(0). if (!MI->killsRegister(X86::FP0 + Op0)) { - // Assume FP6 is not live, use it as a scratch register. - duplicateToTop(Op0, 6, I); + duplicateToTop(Op0, getScratchReg(), I); moveToTop(RegOnTop, I); } else if (getSTReg(Op0) != X86::ST1) { // We have the wrong value at st(1). Shuffle! Untested! @@ -1119,11 +1459,11 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { case X86::RETI: // If RET has an FP register use operand, pass the first one in ST(0) and // the second one in ST(1). - if (isStackEmpty()) return; // Quick check to see if any are possible. - + // Find the register operands. unsigned FirstFPRegOp = ~0U, SecondFPRegOp = ~0U; - + unsigned LiveMask = 0; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &Op = MI->getOperand(i); if (!Op.isReg() || Op.getReg() < X86::FP0 || Op.getReg() > X86::FP6) @@ -1142,12 +1482,18 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { assert(SecondFPRegOp == ~0U && "More than two fp operands!"); SecondFPRegOp = getFPReg(Op); } + LiveMask |= (1 << getFPReg(Op)); // Remove the operand so that later passes don't see it. MI->RemoveOperand(i); --i, --e; } - + + // We may have been carrying spurious live-ins, so make sure only the returned + // registers are left live. + adjustLiveRegs(LiveMask, MI); + if (!LiveMask) return; // Quick check to see if any are possible. + // There are only four possibilities here: // 1) we are returning a single FP value. In this case, it has to be in // ST(0) already, so just declare success by removing the value from the @@ -1173,7 +1519,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { // Duplicate the TOS so that we return it twice. Just pick some other FPx // register to hold it. - unsigned NewReg = (FirstFPRegOp+1)%7; + unsigned NewReg = getScratchReg(); duplicateToTop(FirstFPRegOp, NewReg, MI); FirstFPRegOp = NewReg; } @@ -1197,7 +1543,14 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { } I = MBB->erase(I); // Remove the pseudo instruction - --I; + + // We want to leave I pointing to the previous instruction, but what if we + // just erased the first instruction? + if (I == MBB->begin()) { + DEBUG(dbgs() << "Inserting dummy KILL\n"); + I = BuildMI(*MBB, I, DebugLoc(), TII->get(TargetOpcode::KILL)); + } else + --I; } // Translate a COPY instruction to a pseudo-op that handleSpecialFP understands. diff --git a/lib/Target/X86/X86FloatingPointRegKill.cpp b/lib/Target/X86/X86FloatingPointRegKill.cpp deleted file mode 100644 index 2c98b96c510b..000000000000 --- a/lib/Target/X86/X86FloatingPointRegKill.cpp +++ /dev/null @@ -1,153 +0,0 @@ -//===-- X86FloatingPoint.cpp - FP_REG_KILL inserter -----------------------===// -// -// The LLVM Compiler Infrastructure -// -// This file is distributed under the University of Illinois Open Source -// License. See LICENSE.TXT for details. -// -//===----------------------------------------------------------------------===// -// -// This file defines the pass which inserts FP_REG_KILL instructions. -// -//===----------------------------------------------------------------------===// - -#define DEBUG_TYPE "x86-codegen" -#include "X86.h" -#include "X86InstrInfo.h" -#include "llvm/Instructions.h" -#include "llvm/CodeGen/MachineFunctionPass.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/Passes.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Support/Debug.h" -#include "llvm/Support/CFG.h" -#include "llvm/ADT/Statistic.h" -using namespace llvm; - -STATISTIC(NumFPKill, "Number of FP_REG_KILL instructions added"); - -namespace { - struct FPRegKiller : public MachineFunctionPass { - static char ID; - FPRegKiller() : MachineFunctionPass(&ID) {} - - virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); - AU.addPreservedID(MachineLoopInfoID); - AU.addPreservedID(MachineDominatorsID); - MachineFunctionPass::getAnalysisUsage(AU); - } - - virtual bool runOnMachineFunction(MachineFunction &MF); - - virtual const char *getPassName() const { - return "X86 FP_REG_KILL inserter"; - } - }; - char FPRegKiller::ID = 0; -} - -FunctionPass *llvm::createX87FPRegKillInserterPass() { - return new FPRegKiller(); -} - -/// isFPStackVReg - Return true if the specified vreg is from a fp stack -/// register class. -static bool isFPStackVReg(unsigned RegNo, const MachineRegisterInfo &MRI) { - if (!TargetRegisterInfo::isVirtualRegister(RegNo)) - return false; - - switch (MRI.getRegClass(RegNo)->getID()) { - default: return false; - case X86::RFP32RegClassID: - case X86::RFP64RegClassID: - case X86::RFP80RegClassID: - return true; - } -} - - -/// ContainsFPStackCode - Return true if the specific MBB has floating point -/// stack code, and thus needs an FP_REG_KILL. -static bool ContainsFPStackCode(MachineBasicBlock *MBB, - const MachineRegisterInfo &MRI) { - // Scan the block, looking for instructions that define or use fp stack vregs. - for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); - I != E; ++I) { - for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) { - if (!I->getOperand(op).isReg()) - continue; - if (unsigned Reg = I->getOperand(op).getReg()) - if (isFPStackVReg(Reg, MRI)) - return true; - } - } - - // Check PHI nodes in successor blocks. These PHI's will be lowered to have - // a copy of the input value in this block, which is a definition of the - // value. - for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), - E = MBB->succ_end(); SI != E; ++ SI) { - MachineBasicBlock *SuccBB = *SI; - for (MachineBasicBlock::iterator I = SuccBB->begin(), E = SuccBB->end(); - I != E; ++I) { - // All PHI nodes are at the top of the block. - if (!I->isPHI()) break; - - if (isFPStackVReg(I->getOperand(0).getReg(), MRI)) - return true; - } - } - - return false; -} - -bool FPRegKiller::runOnMachineFunction(MachineFunction &MF) { - // If we are emitting FP stack code, scan the basic block to determine if this - // block defines or uses any FP values. If so, put an FP_REG_KILL instruction - // before the terminator of the block. - - // Note that FP stack instructions are used in all modes for long double, - // so we always need to do this check. - // Also note that it's possible for an FP stack register to be live across - // an instruction that produces multiple basic blocks (SSE CMOV) so we - // must check all the generated basic blocks. - - // Scan all of the machine instructions in these MBBs, checking for FP - // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.) - - // Fast-path: If nothing is using the x87 registers, we don't need to do - // any scanning. - const MachineRegisterInfo &MRI = MF.getRegInfo(); - if (MRI.getRegClassVirtRegs(X86::RFP80RegisterClass).empty() && - MRI.getRegClassVirtRegs(X86::RFP64RegisterClass).empty() && - MRI.getRegClassVirtRegs(X86::RFP32RegisterClass).empty()) - return false; - - bool Changed = false; - MachineFunction::iterator MBBI = MF.begin(); - MachineFunction::iterator EndMBB = MF.end(); - for (; MBBI != EndMBB; ++MBBI) { - MachineBasicBlock *MBB = MBBI; - - // If this block returns, ignore it. We don't want to insert an FP_REG_KILL - // before the return. - if (!MBB->empty()) { - MachineBasicBlock::iterator EndI = MBB->end(); - --EndI; - if (EndI->getDesc().isReturn()) - continue; - } - - // If we find any FP stack code, emit the FP_REG_KILL instruction. - if (ContainsFPStackCode(MBB, MRI)) { - BuildMI(*MBB, MBBI->getFirstTerminator(), DebugLoc(), - MF.getTarget().getInstrInfo()->get(X86::FP_REG_KILL)); - ++NumFPKill; - Changed = true; - } - } - - return Changed; -} diff --git a/lib/Target/X86/X86ISelDAGToDAG.cpp b/lib/Target/X86/X86ISelDAGToDAG.cpp index 72f2bc11d7cc..c5234413aba6 100644 --- a/lib/Target/X86/X86ISelDAGToDAG.cpp +++ b/lib/Target/X86/X86ISelDAGToDAG.cpp @@ -171,6 +171,17 @@ namespace { virtual void PreprocessISelDAG(); + inline bool immSext8(SDNode *N) const { + return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue()); + } + + // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit + // sign extended field. + inline bool i64immSExt32(SDNode *N) const { + uint64_t v = cast<ConstantSDNode>(N)->getZExtValue(); + return (int64_t)v == (int32_t)v; + } + // Include the pieces autogenerated from the target description. #include "X86GenDAGISel.inc" @@ -1312,13 +1323,6 @@ SDNode *X86DAGToDAGISel::getGlobalBaseReg() { return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode(); } -static SDNode *FindCallStartFromCall(SDNode *Node) { - if (Node->getOpcode() == ISD::CALLSEQ_START) return Node; - assert(Node->getOperand(0).getValueType() == MVT::Other && - "Node doesn't have a token chain argument!"); - return FindCallStartFromCall(Node->getOperand(0).getNode()); -} - SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) { SDValue Chain = Node->getOperand(0); SDValue In1 = Node->getOperand(1); @@ -1403,7 +1407,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { Opc = X86::LOCK_DEC16m; else if (isSub) { if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_SUB16mi8; else Opc = X86::LOCK_SUB16mi; @@ -1411,7 +1415,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { Opc = X86::LOCK_SUB16mr; } else { if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_ADD16mi8; else Opc = X86::LOCK_ADD16mi; @@ -1426,7 +1430,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { Opc = X86::LOCK_DEC32m; else if (isSub) { if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_SUB32mi8; else Opc = X86::LOCK_SUB32mi; @@ -1434,7 +1438,7 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { Opc = X86::LOCK_SUB32mr; } else { if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_ADD32mi8; else Opc = X86::LOCK_ADD32mi; @@ -1450,17 +1454,17 @@ SDNode *X86DAGToDAGISel::SelectAtomicLoadAdd(SDNode *Node, EVT NVT) { else if (isSub) { Opc = X86::LOCK_SUB64mr; if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_SUB64mi8; - else if (Predicate_i64immSExt32(Val.getNode())) + else if (i64immSExt32(Val.getNode())) Opc = X86::LOCK_SUB64mi32; } } else { Opc = X86::LOCK_ADD64mr; if (isCN) { - if (Predicate_immSext8(Val.getNode())) + if (immSext8(Val.getNode())) Opc = X86::LOCK_ADD64mi8; - else if (Predicate_i64immSExt32(Val.getNode())) + else if (i64immSExt32(Val.getNode())) Opc = X86::LOCK_ADD64mi32; } } @@ -1841,7 +1845,8 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) { // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to // use a smaller encoding. - if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse()) + if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() && + HasNoSignedComparisonUses(Node)) // Look past the truncate if CMP is the only use of it. N0 = N0.getOperand(0); if (N0.getNode()->getOpcode() == ISD::AND && N0.getNode()->hasOneUse() && diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index b3c48862898f..95dbb6176687 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -16,6 +16,7 @@ #include "X86.h" #include "X86InstrBuilder.h" #include "X86ISelLowering.h" +#include "X86ShuffleDecode.h" #include "X86TargetMachine.h" #include "X86TargetObjectFile.h" #include "llvm/CallingConv.h" @@ -343,8 +344,9 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) if (Subtarget->hasSSE1()) setOperationAction(ISD::PREFETCH , MVT::Other, Legal); - if (!Subtarget->hasSSE2()) - setOperationAction(ISD::MEMBARRIER , MVT::Other, Expand); + // We may not have a libcall for MEMBARRIER so we should lower this. + setOperationAction(ISD::MEMBARRIER , MVT::Other, Custom); + // On X86 and X86-64, atomic operations are lowered to locked instructions. // Locked instructions, in turn, have implicit fence semantics (all memory // operations are flushed before issuing the locked instruction, and they @@ -837,6 +839,10 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) // FIXME: Do we need to handle scalar-to-vector here? setOperationAction(ISD::MUL, MVT::v4i32, Legal); + // Can turn SHL into an integer multiply. + setOperationAction(ISD::SHL, MVT::v4i32, Custom); + setOperationAction(ISD::SHL, MVT::v16i8, Custom); + // i8 and i16 vectors are custom , because the source register and source // source memory operand types are not the same width. f32 vectors are // custom since the immediate controlling the insert encodes additional @@ -866,6 +872,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) addRegisterClass(MVT::v4f64, X86::VR256RegisterClass); addRegisterClass(MVT::v8i32, X86::VR256RegisterClass); addRegisterClass(MVT::v4i64, X86::VR256RegisterClass); + addRegisterClass(MVT::v32i8, X86::VR256RegisterClass); setOperationAction(ISD::LOAD, MVT::v8f32, Legal); setOperationAction(ISD::LOAD, MVT::v8i32, Legal); @@ -877,7 +884,7 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM) setOperationAction(ISD::FDIV, MVT::v8f32, Legal); setOperationAction(ISD::FSQRT, MVT::v8f32, Legal); setOperationAction(ISD::FNEG, MVT::v8f32, Custom); - //setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom); + setOperationAction(ISD::BUILD_VECTOR, MVT::v8f32, Custom); //setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Custom); //setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8f32, Custom); //setOperationAction(ISD::SELECT, MVT::v8f32, Custom); @@ -1189,6 +1196,50 @@ unsigned X86TargetLowering::getFunctionAlignment(const Function *F) const { return F->hasFnAttr(Attribute::OptimizeForSize) ? 0 : 4; } +std::pair<const TargetRegisterClass*, uint8_t> +X86TargetLowering::findRepresentativeClass(EVT VT) const{ + const TargetRegisterClass *RRC = 0; + uint8_t Cost = 1; + switch (VT.getSimpleVT().SimpleTy) { + default: + return TargetLowering::findRepresentativeClass(VT); + case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64: + RRC = (Subtarget->is64Bit() + ? X86::GR64RegisterClass : X86::GR32RegisterClass); + break; + case MVT::v8i8: case MVT::v4i16: + case MVT::v2i32: case MVT::v1i64: + RRC = X86::VR64RegisterClass; + break; + case MVT::f32: case MVT::f64: + case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64: + case MVT::v4f32: case MVT::v2f64: + case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32: + case MVT::v4f64: + RRC = X86::VR128RegisterClass; + break; + } + return std::make_pair(RRC, Cost); +} + +unsigned +X86TargetLowering::getRegPressureLimit(const TargetRegisterClass *RC, + MachineFunction &MF) const { + unsigned FPDiff = RegInfo->hasFP(MF) ? 1 : 0; + switch (RC->getID()) { + default: + return 0; + case X86::GR32RegClassID: + return 4 - FPDiff; + case X86::GR64RegClassID: + return 8 - FPDiff; + case X86::VR128RegClassID: + return Subtarget->is64Bit() ? 10 : 4; + case X86::VR64RegClassID: + return 4; + } +} + bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const { if (!Subtarget->isTargetLinux()) @@ -1259,6 +1310,19 @@ X86TargetLowering::LowerReturn(SDValue Chain, CCValAssign &VA = RVLocs[i]; assert(VA.isRegLoc() && "Can only return in registers!"); SDValue ValToCopy = OutVals[i]; + EVT ValVT = ValToCopy.getValueType(); + + // If this is x86-64, and we disabled SSE, we can't return FP values + if ((ValVT == MVT::f32 || ValVT == MVT::f64) && + (Subtarget->is64Bit() && !Subtarget->hasSSE1())) { + report_fatal_error("SSE register return with SSE disabled"); + } + // Likewise we can't return F64 values with SSE1 only. gcc does so, but + // llvm-gcc has never done it right and no one has noticed, so this + // should be OK for now. + if (ValVT == MVT::f64 && + (Subtarget->is64Bit() && !Subtarget->hasSSE2())) + report_fatal_error("SSE2 register return with SSE2 disabled"); // Returns in ST0/ST1 are handled specially: these are pushed as operands to // the RET instruction and handled by the FP Stackifier. @@ -1276,14 +1340,20 @@ X86TargetLowering::LowerReturn(SDValue Chain, // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64 // which is returned in RAX / RDX. if (Subtarget->is64Bit()) { - EVT ValVT = ValToCopy.getValueType(); if (ValVT.isVector() && ValVT.getSizeInBits() == 64) { ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::i64, ValToCopy); - if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) - ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, ValToCopy); + if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) { + ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, + ValToCopy); + + // If we don't have SSE2 available, convert to v4f32 so the generated + // register is legal. + if (!Subtarget->hasSSE2()) + ValToCopy = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32,ValToCopy); + } } } - + Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag); Flag = Chain.getValue(1); } @@ -1570,6 +1640,8 @@ X86TargetLowering::LowerFormalArguments(SDValue Chain, RC = X86::FR32RegisterClass; else if (RegVT == MVT::f64) RC = X86::FR64RegisterClass; + else if (RegVT.isVector() && RegVT.getSizeInBits() == 256) + RC = X86::VR256RegisterClass; else if (RegVT.isVector() && RegVT.getSizeInBits() == 128) RC = X86::VR128RegisterClass; else if (RegVT.isVector() && RegVT.getSizeInBits() == 64) @@ -1937,6 +2009,19 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, if (VA.isRegLoc()) { RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); + if (isVarArg && Subtarget->isTargetWin64()) { + // Win64 ABI requires argument XMM reg to be copied to the corresponding + // shadow reg if callee is a varargs function. + unsigned ShadowReg = 0; + switch (VA.getLocReg()) { + case X86::XMM0: ShadowReg = X86::RCX; break; + case X86::XMM1: ShadowReg = X86::RDX; break; + case X86::XMM2: ShadowReg = X86::R8; break; + case X86::XMM3: ShadowReg = X86::R9; break; + } + if (ShadowReg) + RegsToPass.push_back(std::make_pair(ShadowReg, Arg)); + } } else if (!IsSibcall && (!isTailCall || isByVal)) { assert(VA.isMemLoc()); if (StackPtr.getNode() == 0) @@ -1990,7 +2075,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, } } - if (Is64Bit && isVarArg) { + if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) { // From AMD64 ABI document: // For calls that may call functions that use varargs or stdargs // (prototype-less calls or calls to functions containing ellipsis (...) in @@ -1999,7 +2084,6 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, // the number of registers, but must be an ubound on the number of SSE // registers used and is in the range 0 - 8 inclusive. - // FIXME: Verify this on Win64 // Count the number of XMM registers allocated. static const unsigned XMMArgRegs[] = { X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3, @@ -2165,8 +2249,8 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, if (!isTailCall && Subtarget->isPICStyleGOT()) Ops.push_back(DAG.getRegister(X86::EBX, getPointerTy())); - // Add an implicit use of AL for x86 vararg functions. - if (Is64Bit && isVarArg) + // Add an implicit use of AL for non-Windows x86 64-bit vararg functions. + if (Is64Bit && isVarArg && !Subtarget->isTargetWin64()) Ops.push_back(DAG.getRegister(X86::AL, MVT::i8)); if (InFlag.getNode()) @@ -2356,8 +2440,8 @@ X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee, if (RegInfo->needsStackRealignment(MF)) return false; - // Do not sibcall optimize vararg calls unless the call site is not passing any - // arguments. + // Do not sibcall optimize vararg calls unless the call site is not passing + // any arguments. if (isVarArg && !Outs.empty()) return false; @@ -2493,6 +2577,112 @@ X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo) const { // Other Lowering Hooks //===----------------------------------------------------------------------===// +static bool MayFoldLoad(SDValue Op) { + return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); +} + +static bool MayFoldIntoStore(SDValue Op) { + return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); +} + +static bool isTargetShuffle(unsigned Opcode) { + switch(Opcode) { + default: return false; + case X86ISD::PSHUFD: + case X86ISD::PSHUFHW: + case X86ISD::PSHUFLW: + case X86ISD::SHUFPD: + case X86ISD::SHUFPS: + case X86ISD::MOVLHPS: + case X86ISD::MOVLHPD: + case X86ISD::MOVHLPS: + case X86ISD::MOVLPS: + case X86ISD::MOVLPD: + case X86ISD::MOVSHDUP: + case X86ISD::MOVSLDUP: + case X86ISD::MOVSS: + case X86ISD::MOVSD: + case X86ISD::UNPCKLPS: + case X86ISD::UNPCKLPD: + case X86ISD::PUNPCKLWD: + case X86ISD::PUNPCKLBW: + case X86ISD::PUNPCKLDQ: + case X86ISD::PUNPCKLQDQ: + case X86ISD::UNPCKHPS: + case X86ISD::UNPCKHPD: + case X86ISD::PUNPCKHWD: + case X86ISD::PUNPCKHBW: + case X86ISD::PUNPCKHDQ: + case X86ISD::PUNPCKHQDQ: + return true; + } + return false; +} + +static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, + SDValue V1, SelectionDAG &DAG) { + switch(Opc) { + default: llvm_unreachable("Unknown x86 shuffle node"); + case X86ISD::MOVSHDUP: + case X86ISD::MOVSLDUP: + return DAG.getNode(Opc, dl, VT, V1); + } + + return SDValue(); +} + +static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, + SDValue V1, unsigned TargetMask, SelectionDAG &DAG) { + switch(Opc) { + default: llvm_unreachable("Unknown x86 shuffle node"); + case X86ISD::PSHUFD: + case X86ISD::PSHUFHW: + case X86ISD::PSHUFLW: + return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8)); + } + + return SDValue(); +} + +static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, + SDValue V1, SDValue V2, unsigned TargetMask, SelectionDAG &DAG) { + switch(Opc) { + default: llvm_unreachable("Unknown x86 shuffle node"); + case X86ISD::SHUFPD: + case X86ISD::SHUFPS: + return DAG.getNode(Opc, dl, VT, V1, V2, + DAG.getConstant(TargetMask, MVT::i8)); + } + return SDValue(); +} + +static SDValue getTargetShuffleNode(unsigned Opc, DebugLoc dl, EVT VT, + SDValue V1, SDValue V2, SelectionDAG &DAG) { + switch(Opc) { + default: llvm_unreachable("Unknown x86 shuffle node"); + case X86ISD::MOVLHPS: + case X86ISD::MOVLHPD: + case X86ISD::MOVHLPS: + case X86ISD::MOVLPS: + case X86ISD::MOVLPD: + case X86ISD::MOVSS: + case X86ISD::MOVSD: + case X86ISD::UNPCKLPS: + case X86ISD::UNPCKLPD: + case X86ISD::PUNPCKLWD: + case X86ISD::PUNPCKLBW: + case X86ISD::PUNPCKLDQ: + case X86ISD::PUNPCKLQDQ: + case X86ISD::UNPCKHPS: + case X86ISD::UNPCKHPD: + case X86ISD::PUNPCKHWD: + case X86ISD::PUNPCKHBW: + case X86ISD::PUNPCKHDQ: + case X86ISD::PUNPCKHQDQ: + return DAG.getNode(Opc, dl, VT, V1, V2); + } + return SDValue(); +} SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const { MachineFunction &MF = DAG.getMachineFunction(); @@ -3347,18 +3537,27 @@ static SDValue getZeroVector(EVT VT, bool HasSSE2, SelectionDAG &DAG, DebugLoc dl) { assert(VT.isVector() && "Expected a vector type"); - // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest - // type. This ensures they get CSE'd. + // Always build zero vectors as <4 x i32> or <2 x i32> bitcasted + // to their dest type. This ensures they get CSE'd. SDValue Vec; if (VT.getSizeInBits() == 64) { // MMX SDValue Cst = DAG.getTargetConstant(0, MVT::i32); Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); - } else if (HasSSE2) { // SSE2 - SDValue Cst = DAG.getTargetConstant(0, MVT::i32); - Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); - } else { // SSE1 + } else if (VT.getSizeInBits() == 128) { + if (HasSSE2) { // SSE2 + SDValue Cst = DAG.getTargetConstant(0, MVT::i32); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); + } else { // SSE1 + SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); + } + } else if (VT.getSizeInBits() == 256) { // AVX + // 256-bit logic and arithmetic instructions in AVX are + // all floating-point, no support for integer ops. Default + // to emitting fp zeroed vectors then. SDValue Cst = DAG.getTargetConstantFP(+0.0, MVT::f32); - Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst); + SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst }; + Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops, 8); } return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } @@ -3372,9 +3571,9 @@ static SDValue getOnesVector(EVT VT, SelectionDAG &DAG, DebugLoc dl) { // type. This ensures they get CSE'd. SDValue Cst = DAG.getTargetConstant(~0U, MVT::i32); SDValue Vec; - if (VT.getSizeInBits() == 64) // MMX + if (VT.getSizeInBits() == 64) // MMX Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2i32, Cst, Cst); - else // SSE + else // SSE Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst); return DAG.getNode(ISD::BIT_CONVERT, dl, VT, Vec); } @@ -3439,9 +3638,8 @@ static SDValue getUnpackh(SelectionDAG &DAG, DebugLoc dl, EVT VT, SDValue V1, return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]); } -/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32. -static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG, - bool HasSSE2) { +/// PromoteSplat - Promote a splat of v4i32, v8i16 or v16i8 to v4f32. +static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) { if (SV->getValueType(0).getVectorNumElements() <= 4) return SDValue(SV, 0); @@ -3488,68 +3686,253 @@ static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx, return DAG.getVectorShuffle(VT, V2.getDebugLoc(), V1, V2, &MaskVec[0]); } -/// getNumOfConsecutiveZeros - Return the number of elements in a result of -/// a shuffle that is zero. -static -unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp, int NumElems, - bool Low, SelectionDAG &DAG) { - unsigned NumZeros = 0; - for (int i = 0; i < NumElems; ++i) { - unsigned Index = Low ? i : NumElems-i-1; - int Idx = SVOp->getMaskElt(Index); - if (Idx < 0) { - ++NumZeros; - continue; - } - SDValue Elt = DAG.getShuffleScalarElt(SVOp, Index); - if (Elt.getNode() && X86::isZeroNode(Elt)) - ++NumZeros; - else +/// getShuffleScalarElt - Returns the scalar element that will make up the ith +/// element of the result of the vector shuffle. +SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, + unsigned Depth) { + if (Depth == 6) + return SDValue(); // Limit search depth. + + SDValue V = SDValue(N, 0); + EVT VT = V.getValueType(); + unsigned Opcode = V.getOpcode(); + + // Recurse into ISD::VECTOR_SHUFFLE node to find scalars. + if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) { + Index = SV->getMaskElt(Index); + + if (Index < 0) + return DAG.getUNDEF(VT.getVectorElementType()); + + int NumElems = VT.getVectorNumElements(); + SDValue NewV = (Index < NumElems) ? SV->getOperand(0) : SV->getOperand(1); + return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, Depth+1); + } + + // Recurse into target specific vector shuffles to find scalars. + if (isTargetShuffle(Opcode)) { + int NumElems = VT.getVectorNumElements(); + SmallVector<unsigned, 16> ShuffleMask; + SDValue ImmN; + + switch(Opcode) { + case X86ISD::SHUFPS: + case X86ISD::SHUFPD: + ImmN = N->getOperand(N->getNumOperands()-1); + DecodeSHUFPSMask(NumElems, + cast<ConstantSDNode>(ImmN)->getZExtValue(), + ShuffleMask); + break; + case X86ISD::PUNPCKHBW: + case X86ISD::PUNPCKHWD: + case X86ISD::PUNPCKHDQ: + case X86ISD::PUNPCKHQDQ: + DecodePUNPCKHMask(NumElems, ShuffleMask); break; + case X86ISD::UNPCKHPS: + case X86ISD::UNPCKHPD: + DecodeUNPCKHPMask(NumElems, ShuffleMask); + break; + case X86ISD::PUNPCKLBW: + case X86ISD::PUNPCKLWD: + case X86ISD::PUNPCKLDQ: + case X86ISD::PUNPCKLQDQ: + DecodePUNPCKLMask(NumElems, ShuffleMask); + break; + case X86ISD::UNPCKLPS: + case X86ISD::UNPCKLPD: + DecodeUNPCKLPMask(NumElems, ShuffleMask); + break; + case X86ISD::MOVHLPS: + DecodeMOVHLPSMask(NumElems, ShuffleMask); + break; + case X86ISD::MOVLHPS: + DecodeMOVLHPSMask(NumElems, ShuffleMask); + break; + case X86ISD::PSHUFD: + ImmN = N->getOperand(N->getNumOperands()-1); + DecodePSHUFMask(NumElems, + cast<ConstantSDNode>(ImmN)->getZExtValue(), + ShuffleMask); + break; + case X86ISD::PSHUFHW: + ImmN = N->getOperand(N->getNumOperands()-1); + DecodePSHUFHWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), + ShuffleMask); + break; + case X86ISD::PSHUFLW: + ImmN = N->getOperand(N->getNumOperands()-1); + DecodePSHUFLWMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), + ShuffleMask); + break; + case X86ISD::MOVSS: + case X86ISD::MOVSD: { + // The index 0 always comes from the first element of the second source, + // this is why MOVSS and MOVSD are used in the first place. The other + // elements come from the other positions of the first source vector. + unsigned OpNum = (Index == 0) ? 1 : 0; + return getShuffleScalarElt(V.getOperand(OpNum).getNode(), Index, DAG, + Depth+1); + } + default: + assert("not implemented for target shuffle node"); + return SDValue(); + } + + Index = ShuffleMask[Index]; + if (Index < 0) + return DAG.getUNDEF(VT.getVectorElementType()); + + SDValue NewV = (Index < NumElems) ? N->getOperand(0) : N->getOperand(1); + return getShuffleScalarElt(NewV.getNode(), Index % NumElems, DAG, + Depth+1); } - return NumZeros; -} -/// isVectorShift - Returns true if the shuffle can be implemented as a -/// logical left or right shift of a vector. -/// FIXME: split into pslldqi, psrldqi, palignr variants. -static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, - bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { - unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); + // Actual nodes that may contain scalar elements + if (Opcode == ISD::BIT_CONVERT) { + V = V.getOperand(0); + EVT SrcVT = V.getValueType(); + unsigned NumElems = VT.getVectorNumElements(); - isLeft = true; - unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, true, DAG); - if (!NumZeros) { - isLeft = false; - NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, false, DAG); - if (!NumZeros) - return false; + if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems) + return SDValue(); + } + + if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) + return (Index == 0) ? V.getOperand(0) + : DAG.getUNDEF(VT.getVectorElementType()); + + if (V.getOpcode() == ISD::BUILD_VECTOR) + return V.getOperand(Index); + + return SDValue(); +} + +/// getNumOfConsecutiveZeros - Return the number of elements of a vector +/// shuffle operation which come from a consecutively from a zero. The +/// search can start in two diferent directions, from left or right. +static +unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, + bool ZerosFromLeft, SelectionDAG &DAG) { + int i = 0; + + while (i < NumElems) { + unsigned Index = ZerosFromLeft ? i : NumElems-i-1; + SDValue Elt = getShuffleScalarElt(N, Index, DAG, 0); + if (!(Elt.getNode() && + (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt)))) + break; + ++i; } + + return i; +} + +/// isShuffleMaskConsecutive - Check if the shuffle mask indicies from MaskI to +/// MaskE correspond consecutively to elements from one of the vector operands, +/// starting from its index OpIdx. Also tell OpNum which source vector operand. +static +bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp, int MaskI, int MaskE, + int OpIdx, int NumElems, unsigned &OpNum) { bool SeenV1 = false; bool SeenV2 = false; - for (unsigned i = NumZeros; i < NumElems; ++i) { - unsigned Val = isLeft ? (i - NumZeros) : i; - int Idx_ = SVOp->getMaskElt(isLeft ? i : (i - NumZeros)); - if (Idx_ < 0) + + for (int i = MaskI; i <= MaskE; ++i, ++OpIdx) { + int Idx = SVOp->getMaskElt(i); + // Ignore undef indicies + if (Idx < 0) continue; - unsigned Idx = (unsigned) Idx_; + if (Idx < NumElems) SeenV1 = true; - else { - Idx -= NumElems; + else SeenV2 = true; - } - if (Idx != Val) + + // Only accept consecutive elements from the same vector + if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2)) return false; } - if (SeenV1 && SeenV2) + + OpNum = SeenV1 ? 0 : 1; + return true; +} + +/// isVectorShiftRight - Returns true if the shuffle can be implemented as a +/// logical left shift of a vector. +static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, + bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { + unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); + unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, + false /* check zeros from right */, DAG); + unsigned OpSrc; + + if (!NumZeros) + return false; + + // Considering the elements in the mask that are not consecutive zeros, + // check if they consecutively come from only one of the source vectors. + // + // V1 = {X, A, B, C} 0 + // \ \ \ / + // vector_shuffle V1, V2 <1, 2, 3, X> + // + if (!isShuffleMaskConsecutive(SVOp, + 0, // Mask Start Index + NumElems-NumZeros-1, // Mask End Index + NumZeros, // Where to start looking in the src vector + NumElems, // Number of elements in vector + OpSrc)) // Which source operand ? + return false; + + isLeft = false; + ShAmt = NumZeros; + ShVal = SVOp->getOperand(OpSrc); + return true; +} + +/// isVectorShiftLeft - Returns true if the shuffle can be implemented as a +/// logical left shift of a vector. +static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, + bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { + unsigned NumElems = SVOp->getValueType(0).getVectorNumElements(); + unsigned NumZeros = getNumOfConsecutiveZeros(SVOp, NumElems, + true /* check zeros from left */, DAG); + unsigned OpSrc; + + if (!NumZeros) + return false; + + // Considering the elements in the mask that are not consecutive zeros, + // check if they consecutively come from only one of the source vectors. + // + // 0 { A, B, X, X } = V2 + // / \ / / + // vector_shuffle V1, V2 <X, X, 4, 5> + // + if (!isShuffleMaskConsecutive(SVOp, + NumZeros, // Mask Start Index + NumElems-1, // Mask End Index + 0, // Where to start looking in the src vector + NumElems, // Number of elements in vector + OpSrc)) // Which source operand ? return false; - ShVal = SeenV1 ? SVOp->getOperand(0) : SVOp->getOperand(1); + isLeft = true; ShAmt = NumZeros; + ShVal = SVOp->getOperand(OpSrc); return true; } +/// isVectorShift - Returns true if the shuffle can be implemented as a +/// logical left or right shift of a vector. +static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG, + bool &isLeft, SDValue &ShVal, unsigned &ShAmt) { + if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) || + isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt)) + return true; + + return false; +} /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8. /// @@ -3779,9 +4162,13 @@ static SDValue EltsFromConsecutiveLoads(EVT VT, SmallVectorImpl<SDValue> &Elts, SDValue X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { DebugLoc dl = Op.getDebugLoc(); - // All zero's are handled with pxor, all one's are handled with pcmpeqd. - if (ISD::isBuildVectorAllZeros(Op.getNode()) - || ISD::isBuildVectorAllOnes(Op.getNode())) { + // All zero's are handled with pxor in SSE2 and above, xorps in SSE1. + // All one's are handled with pcmpeqd. In AVX, zero's are handled with + // vpxor in 128-bit and xor{pd,ps} in 256-bit, but no 256 version of pcmpeqd + // is present, so AllOnes is ignored. + if (ISD::isBuildVectorAllZeros(Op.getNode()) || + (Op.getValueType().getSizeInBits() != 256 && + ISD::isBuildVectorAllOnes(Op.getNode()))) { // Canonicalize this to either <4 x i32> or <2 x i32> (SSE vs MMX) to // 1) ensure the zero vectors are CSE'd, and 2) ensure that i64 scalars are // eliminated on x86-32 hosts. @@ -3819,10 +4206,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { } } - if (NumNonZero == 0) { - // All undef vector. Return an UNDEF. All zero vectors were handled above. + // All undef vector. Return an UNDEF. All zero vectors were handled above. + if (NumNonZero == 0) return DAG.getUNDEF(VT); - } // Special case for single non-zero, non-undef, element. if (NumNonZero == 1) { @@ -3960,7 +4346,7 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (EVTBits == 16 && NumElems == 8) { SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG, - *this); + *this); if (V.getNode()) return V; } @@ -4014,28 +4400,51 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const { if (LD.getNode()) return LD; - // For SSE 4.1, use inserts into undef. + // For SSE 4.1, use insertps to put the high elements into the low element. if (getSubtarget()->hasSSE41()) { - V[0] = DAG.getUNDEF(VT); - for (unsigned i = 0; i < NumElems; ++i) - if (Op.getOperand(i).getOpcode() != ISD::UNDEF) - V[0] = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, V[0], + SDValue Result; + if (Op.getOperand(0).getOpcode() != ISD::UNDEF) + Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0)); + else + Result = DAG.getUNDEF(VT); + + for (unsigned i = 1; i < NumElems; ++i) { + if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue; + Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result, Op.getOperand(i), DAG.getIntPtrConstant(i)); - return V[0]; + } + return Result; } - // Otherwise, expand into a number of unpckl* - // e.g. for v4f32 + // Otherwise, expand into a number of unpckl*, start by extending each of + // our (non-undef) elements to the full vector width with the element in the + // bottom slot of the vector (which generates no code for SSE). + for (unsigned i = 0; i < NumElems; ++i) { + if (Op.getOperand(i).getOpcode() != ISD::UNDEF) + V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); + else + V[i] = DAG.getUNDEF(VT); + } + + // Next, we iteratively mix elements, e.g. for v4f32: // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0> // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1> // Step 2: unpcklps X, Y ==> <3, 2, 1, 0> - for (unsigned i = 0; i < NumElems; ++i) - V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i)); - NumElems >>= 1; - while (NumElems != 0) { - for (unsigned i = 0; i < NumElems; ++i) - V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + NumElems]); - NumElems >>= 1; + unsigned EltStride = NumElems >> 1; + while (EltStride != 0) { + for (unsigned i = 0; i < EltStride; ++i) { + // If V[i+EltStride] is undef and this is the first round of mixing, + // then it is safe to just drop this shuffle: V[i] is already in the + // right place, the one element (since it's the first round) being + // inserted as undef can be dropped. This isn't safe for successive + // rounds because they will permute elements within both vectors. + if (V[i+EltStride].getOpcode() == ISD::UNDEF && + EltStride == NumElems/2) + continue; + + V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]); + } + EltStride >>= 1; } return V[0]; } @@ -4074,10 +4483,10 @@ X86TargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const { // 2. [ssse3] 1 x pshufb // 3. [ssse3] 2 x pshufb + 1 x por // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw) -static -SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp, - SelectionDAG &DAG, - const X86TargetLowering &TLI) { +SDValue +X86TargetLowering::LowerVECTOR_SHUFFLEv8i16(SDValue Op, + SelectionDAG &DAG) const { + ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); SDValue V1 = SVOp->getOperand(0); SDValue V2 = SVOp->getOperand(1); DebugLoc dl = SVOp->getDebugLoc(); @@ -4128,7 +4537,7 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp, // quads, disable the next transformation since it does not help SSSE3. bool V1Used = InputQuads[0] || InputQuads[1]; bool V2Used = InputQuads[2] || InputQuads[3]; - if (TLI.getSubtarget()->hasSSSE3()) { + if (Subtarget->hasSSSE3()) { if (InputQuads.count() == 2 && V1Used && V2Used) { BestLoQuad = InputQuads.find_first(); BestHiQuad = InputQuads.find_next(BestLoQuad); @@ -4187,15 +4596,21 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp, // If we've eliminated the use of V2, and the new mask is a pshuflw or // pshufhw, that's as cheap as it gets. Return the new shuffle. if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) { - return DAG.getVectorShuffle(MVT::v8i16, dl, NewV, + unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW; + unsigned TargetMask = 0; + NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), &MaskVals[0]); + TargetMask = pshufhw ? X86::getShufflePSHUFHWImmediate(NewV.getNode()): + X86::getShufflePSHUFLWImmediate(NewV.getNode()); + V1 = NewV.getOperand(0); + return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG); } } // If we have SSSE3, and all words of the result are from 1 input vector, // case 2 is generated, otherwise case 3 is generated. If no SSSE3 // is present, fall back to case 4. - if (TLI.getSubtarget()->hasSSSE3()) { + if (Subtarget->hasSSSE3()) { SmallVector<SDValue,16> pshufbMask; // If we have elements from both input vectors, set the high bit of the @@ -4262,6 +4677,12 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp, MaskV.push_back(i); NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), &MaskV[0]); + + if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) + NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16, + NewV.getOperand(0), + X86::getShufflePSHUFLWImmediate(NewV.getNode()), + DAG); } // If BestHi >= 0, generate a pshufhw to put the high elements in order, @@ -4284,6 +4705,12 @@ SDValue LowerVECTOR_SHUFFLEv8i16(ShuffleVectorSDNode *SVOp, } NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16), &MaskV[0]); + + if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSSE3()) + NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16, + NewV.getOperand(0), + X86::getShufflePSHUFHWImmediate(NewV.getNode()), + DAG); } // In case BestHi & BestLo were both -1, which means each quadword has a word @@ -4473,7 +4900,7 @@ SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp, SDValue V2 = SVOp->getOperand(1); unsigned NumElems = VT.getVectorNumElements(); unsigned NewWidth = (NumElems == 4) ? 2 : 4; - EVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth); + EVT MaskVT = (NewWidth == 4) ? MVT::v4i16 : MVT::v2i32; EVT NewVT = MaskVT; switch (VT.getSimpleVT().SimpleTy) { default: assert(false && "Unexpected!"); @@ -4697,6 +5124,129 @@ LowerVECTOR_SHUFFLE_4wide(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) { return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]); } +static bool MayFoldVectorLoad(SDValue V) { + if (V.hasOneUse() && V.getOpcode() == ISD::BIT_CONVERT) + V = V.getOperand(0); + if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR) + V = V.getOperand(0); + if (MayFoldLoad(V)) + return true; + return false; +} + +static +SDValue getMOVLowToHigh(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, + bool HasSSE2) { + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + EVT VT = Op.getValueType(); + + assert(VT != MVT::v2i64 && "unsupported shuffle type"); + + if (HasSSE2 && VT == MVT::v2f64) + return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG); + + // v4f32 or v4i32 + return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V2, DAG); +} + +static +SDValue getMOVHighToLow(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG) { + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + EVT VT = Op.getValueType(); + + assert((VT == MVT::v4i32 || VT == MVT::v4f32) && + "unsupported shuffle type"); + + if (V2.getOpcode() == ISD::UNDEF) + V2 = V1; + + // v4i32 or v4f32 + return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG); +} + +static +SDValue getMOVLP(SDValue &Op, DebugLoc &dl, SelectionDAG &DAG, bool HasSSE2) { + SDValue V1 = Op.getOperand(0); + SDValue V2 = Op.getOperand(1); + EVT VT = Op.getValueType(); + unsigned NumElems = VT.getVectorNumElements(); + + // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second + // operand of these instructions is only memory, so check if there's a + // potencial load folding here, otherwise use SHUFPS or MOVSD to match the + // same masks. + bool CanFoldLoad = false; + + // Trivial case, when V2 comes from a load. + if (MayFoldVectorLoad(V2)) + CanFoldLoad = true; + + // When V1 is a load, it can be folded later into a store in isel, example: + // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1) + // turns into: + // (MOVLPSmr addr:$src1, VR128:$src2) + // So, recognize this potential and also use MOVLPS or MOVLPD + if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op)) + CanFoldLoad = true; + + if (CanFoldLoad) { + if (HasSSE2 && NumElems == 2) + return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG); + + if (NumElems == 4) + return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG); + } + + ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); + // movl and movlp will both match v2i64, but v2i64 is never matched by + // movl earlier because we make it strict to avoid messing with the movlp load + // folding logic (see the code above getMOVLP call). Match it here then, + // this is horrible, but will stay like this until we move all shuffle + // matching to x86 specific nodes. Note that for the 1st condition all + // types are matched with movsd. + if ((HasSSE2 && NumElems == 2) || !X86::isMOVLMask(SVOp)) + return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); + else if (HasSSE2) + return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); + + + assert(VT != MVT::v4i32 && "unsupported shuffle type"); + + // Invert the operand order and use SHUFPS to match it. + return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V2, V1, + X86::getShuffleSHUFImmediate(SVOp), DAG); +} + +static inline unsigned getUNPCKLOpcode(EVT VT) { + switch(VT.getSimpleVT().SimpleTy) { + case MVT::v4i32: return X86ISD::PUNPCKLDQ; + case MVT::v2i64: return X86ISD::PUNPCKLQDQ; + case MVT::v4f32: return X86ISD::UNPCKLPS; + case MVT::v2f64: return X86ISD::UNPCKLPD; + case MVT::v16i8: return X86ISD::PUNPCKLBW; + case MVT::v8i16: return X86ISD::PUNPCKLWD; + default: + llvm_unreachable("Unknow type for unpckl"); + } + return 0; +} + +static inline unsigned getUNPCKHOpcode(EVT VT) { + switch(VT.getSimpleVT().SimpleTy) { + case MVT::v4i32: return X86ISD::PUNPCKHDQ; + case MVT::v2i64: return X86ISD::PUNPCKHQDQ; + case MVT::v4f32: return X86ISD::UNPCKHPS; + case MVT::v2f64: return X86ISD::UNPCKHPD; + case MVT::v16i8: return X86ISD::PUNPCKHBW; + case MVT::v8i16: return X86ISD::PUNPCKHWD; + default: + llvm_unreachable("Unknow type for unpckh"); + } + return 0; +} + SDValue X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op); @@ -4710,6 +5260,10 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { bool V2IsUndef = V2.getOpcode() == ISD::UNDEF; bool V1IsSplat = false; bool V2IsSplat = false; + bool HasSSE2 = Subtarget->hasSSE2() || Subtarget->hasAVX(); + bool HasSSE3 = Subtarget->hasSSE3() || Subtarget->hasAVX(); + MachineFunction &MF = DAG.getMachineFunction(); + bool OptForSize = MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize); if (isZeroShuffle(SVOp)) return getZeroVector(VT, Subtarget->hasSSE2(), DAG, dl); @@ -4718,7 +5272,7 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { if (SVOp->isSplat()) { if (isMMX || NumElems < 4) return Op; - return PromoteSplat(SVOp, DAG, Subtarget->hasSSE2()); + return PromoteSplat(SVOp, DAG); } // If the shuffle can be profitably rewritten as a narrower shuffle, then @@ -4746,8 +5300,35 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { } } - if (X86::isPSHUFDMask(SVOp)) - return Op; + // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and + // unpckh_undef). Only use pshufd if speed is more important than size. + if (OptForSize && X86::isUNPCKL_v_undef_Mask(SVOp)) + if (VT != MVT::v2i64 && VT != MVT::v2f64) + return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); + if (OptForSize && X86::isUNPCKH_v_undef_Mask(SVOp)) + if (VT != MVT::v2i64 && VT != MVT::v2f64) + return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); + + if (X86::isPSHUFDMask(SVOp)) { + // The actual implementation will match the mask in the if above and then + // during isel it can match several different instructions, not only pshufd + // as its name says, sad but true, emulate the behavior for now... + if (X86::isMOVDDUPMask(SVOp) && ((VT == MVT::v4f32 || VT == MVT::v2i64))) + return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG); + + unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); + + if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32)) + return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG); + + if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) + return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V1, + TargetMask, DAG); + + if (VT == MVT::v4f32) + return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V1, + TargetMask, DAG); + } // Check if this can be converted into a logical shift. bool isLeft = false; @@ -4768,17 +5349,32 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { return V2; if (ISD::isBuildVectorAllZeros(V1.getNode())) return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl); - if (!isMMX) - return Op; + if (!isMMX && !X86::isMOVLPMask(SVOp)) { + if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64)) + return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG); + + if (VT == MVT::v4i32 || VT == MVT::v4f32) + return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG); + } } // FIXME: fold these into legal mask. - if (!isMMX && (X86::isMOVSHDUPMask(SVOp) || - X86::isMOVSLDUPMask(SVOp) || - X86::isMOVHLPSMask(SVOp) || - X86::isMOVLHPSMask(SVOp) || - X86::isMOVLPMask(SVOp))) - return Op; + if (!isMMX) { + if (X86::isMOVLHPSMask(SVOp) && !X86::isUNPCKLMask(SVOp)) + return getMOVLowToHigh(Op, dl, DAG, HasSSE2); + + if (X86::isMOVHLPSMask(SVOp)) + return getMOVHighToLow(Op, dl, DAG); + + if (X86::isMOVSHDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) + return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG); + + if (X86::isMOVSLDUPMask(SVOp) && HasSSE3 && V2IsUndef && NumElems == 4) + return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG); + + if (X86::isMOVLPMask(SVOp)) + return getMOVLP(Op, dl, DAG, HasSSE2); + } if (ShouldXformToMOVHLPS(SVOp) || ShouldXformToMOVLP(V1.getNode(), V2.getNode(), SVOp)) @@ -4818,11 +5414,13 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { return getMOVL(DAG, dl, VT, V2, V1); } - if (X86::isUNPCKL_v_undef_Mask(SVOp) || - X86::isUNPCKH_v_undef_Mask(SVOp) || - X86::isUNPCKLMask(SVOp) || - X86::isUNPCKHMask(SVOp)) - return Op; + if (X86::isUNPCKLMask(SVOp)) + return (isMMX) ? + Op : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V2, DAG); + + if (X86::isUNPCKHMask(SVOp)) + return (isMMX) ? + Op : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V2, DAG); if (V2IsSplat) { // Normalize mask so all entries that point to V2 points to its first @@ -4844,11 +5442,14 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { // FIXME: this seems wrong. SDValue NewOp = CommuteVectorShuffle(SVOp, DAG); ShuffleVectorSDNode *NewSVOp = cast<ShuffleVectorSDNode>(NewOp); - if (X86::isUNPCKL_v_undef_Mask(NewSVOp) || - X86::isUNPCKH_v_undef_Mask(NewSVOp) || - X86::isUNPCKLMask(NewSVOp) || - X86::isUNPCKHMask(NewSVOp)) - return NewOp; + + if (X86::isUNPCKLMask(NewSVOp)) + return (isMMX) ? + NewOp : getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V2, V1, DAG); + + if (X86::isUNPCKHMask(NewSVOp)) + return (isMMX) ? + NewOp : getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V2, V1, DAG); } // FIXME: for mmx, bitcast v2i32 to v4i16 for shuffle. @@ -4857,15 +5458,52 @@ X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const { if (!isMMX && V2.getOpcode() != ISD::UNDEF && isCommutedSHUFP(SVOp)) return CommuteVectorShuffle(SVOp, DAG); - // Check for legal shuffle and return? - SmallVector<int, 16> PermMask; - SVOp->getMask(PermMask); - if (isShuffleMaskLegal(PermMask, VT)) + // The checks below are all present in isShuffleMaskLegal, but they are + // inlined here right now to enable us to directly emit target specific + // nodes, and remove one by one until they don't return Op anymore. + SmallVector<int, 16> M; + SVOp->getMask(M); + + // Very little shuffling can be done for 64-bit vectors right now. + if (VT.getSizeInBits() == 64) + return isPALIGNRMask(M, VT, Subtarget->hasSSSE3()) ? Op : SDValue(); + + // FIXME: pshufb, blends, shifts. + if (VT.getVectorNumElements() == 2 || + ShuffleVectorSDNode::isSplatMask(&M[0], VT) || + isPALIGNRMask(M, VT, Subtarget->hasSSSE3())) return Op; + if (isPSHUFHWMask(M, VT)) + return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1, + X86::getShufflePSHUFHWImmediate(SVOp), + DAG); + + if (isPSHUFLWMask(M, VT)) + return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1, + X86::getShufflePSHUFLWImmediate(SVOp), + DAG); + + if (isSHUFPMask(M, VT)) { + unsigned TargetMask = X86::getShuffleSHUFImmediate(SVOp); + if (VT == MVT::v4f32 || VT == MVT::v4i32) + return getTargetShuffleNode(X86ISD::SHUFPS, dl, VT, V1, V2, + TargetMask, DAG); + if (VT == MVT::v2f64 || VT == MVT::v2i64) + return getTargetShuffleNode(X86ISD::SHUFPD, dl, VT, V1, V2, + TargetMask, DAG); + } + + if (X86::isUNPCKL_v_undef_Mask(SVOp)) + if (VT != MVT::v2i64 && VT != MVT::v2f64) + return getTargetShuffleNode(getUNPCKLOpcode(VT), dl, VT, V1, V1, DAG); + if (X86::isUNPCKH_v_undef_Mask(SVOp)) + if (VT != MVT::v2i64 && VT != MVT::v2f64) + return getTargetShuffleNode(getUNPCKHOpcode(VT), dl, VT, V1, V1, DAG); + // Handle v8i16 specifically since SSE can do byte extraction and insertion. if (VT == MVT::v8i16) { - SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(SVOp, DAG, *this); + SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, DAG); if (NewOp.getNode()) return NewOp; } @@ -6922,24 +7560,58 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const DAG.getConstant(X86CC, MVT::i8), Cond); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); } - // ptest intrinsics. The intrinsic these come from are designed to return - // an integer value, not just an instruction so lower it to the ptest - // pattern and a setcc for the result. + // ptest and testp intrinsics. The intrinsic these come from are designed to + // return an integer value, not just an instruction so lower it to the ptest + // or testp pattern and a setcc for the result. case Intrinsic::x86_sse41_ptestz: case Intrinsic::x86_sse41_ptestc: - case Intrinsic::x86_sse41_ptestnzc:{ + case Intrinsic::x86_sse41_ptestnzc: + case Intrinsic::x86_avx_ptestz_256: + case Intrinsic::x86_avx_ptestc_256: + case Intrinsic::x86_avx_ptestnzc_256: + case Intrinsic::x86_avx_vtestz_ps: + case Intrinsic::x86_avx_vtestc_ps: + case Intrinsic::x86_avx_vtestnzc_ps: + case Intrinsic::x86_avx_vtestz_pd: + case Intrinsic::x86_avx_vtestc_pd: + case Intrinsic::x86_avx_vtestnzc_pd: + case Intrinsic::x86_avx_vtestz_ps_256: + case Intrinsic::x86_avx_vtestc_ps_256: + case Intrinsic::x86_avx_vtestnzc_ps_256: + case Intrinsic::x86_avx_vtestz_pd_256: + case Intrinsic::x86_avx_vtestc_pd_256: + case Intrinsic::x86_avx_vtestnzc_pd_256: { + bool IsTestPacked = false; unsigned X86CC = 0; switch (IntNo) { default: llvm_unreachable("Bad fallthrough in Intrinsic lowering."); + case Intrinsic::x86_avx_vtestz_ps: + case Intrinsic::x86_avx_vtestz_pd: + case Intrinsic::x86_avx_vtestz_ps_256: + case Intrinsic::x86_avx_vtestz_pd_256: + IsTestPacked = true; // Fallthrough case Intrinsic::x86_sse41_ptestz: + case Intrinsic::x86_avx_ptestz_256: // ZF = 1 X86CC = X86::COND_E; break; + case Intrinsic::x86_avx_vtestc_ps: + case Intrinsic::x86_avx_vtestc_pd: + case Intrinsic::x86_avx_vtestc_ps_256: + case Intrinsic::x86_avx_vtestc_pd_256: + IsTestPacked = true; // Fallthrough case Intrinsic::x86_sse41_ptestc: + case Intrinsic::x86_avx_ptestc_256: // CF = 1 X86CC = X86::COND_B; break; + case Intrinsic::x86_avx_vtestnzc_ps: + case Intrinsic::x86_avx_vtestnzc_pd: + case Intrinsic::x86_avx_vtestnzc_ps_256: + case Intrinsic::x86_avx_vtestnzc_pd_256: + IsTestPacked = true; // Fallthrough case Intrinsic::x86_sse41_ptestnzc: + case Intrinsic::x86_avx_ptestnzc_256: // ZF and CF = 0 X86CC = X86::COND_A; break; @@ -6947,7 +7619,8 @@ X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const SDValue LHS = Op.getOperand(1); SDValue RHS = Op.getOperand(2); - SDValue Test = DAG.getNode(X86ISD::PTEST, dl, MVT::i32, LHS, RHS); + unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST; + SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS); SDValue CC = DAG.getConstant(X86CC, MVT::i8); SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test); return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC); @@ -7110,12 +7783,13 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const { SDValue Handler = Op.getOperand(2); DebugLoc dl = Op.getDebugLoc(); - SDValue Frame = DAG.getRegister(Subtarget->is64Bit() ? X86::RBP : X86::EBP, - getPointerTy()); + SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, + Subtarget->is64Bit() ? X86::RBP : X86::EBP, + getPointerTy()); unsigned StoreAddrReg = (Subtarget->is64Bit() ? X86::RCX : X86::ECX); - SDValue StoreAddr = DAG.getNode(ISD::SUB, dl, getPointerTy(), Frame, - DAG.getIntPtrConstant(-TD->getPointerSize())); + SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), Frame, + DAG.getIntPtrConstant(TD->getPointerSize())); StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(), StoreAddr, Offset); Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, NULL, 0, false, false, 0); Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr); @@ -7218,7 +7892,8 @@ SDValue X86TargetLowering::LowerTRAMPOLINE(SDValue Op, InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32; if (InRegCount > 2) { - report_fatal_error("Nest register in use - reduce number of inreg parameters!"); + report_fatal_error("Nest register in use - reduce number of inreg" + " parameters!"); } } break; @@ -7439,6 +8114,86 @@ SDValue X86TargetLowering::LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const { return Res; } +SDValue X86TargetLowering::LowerSHL(SDValue Op, SelectionDAG &DAG) const { + EVT VT = Op.getValueType(); + DebugLoc dl = Op.getDebugLoc(); + SDValue R = Op.getOperand(0); + + LLVMContext *Context = DAG.getContext(); + + assert(Subtarget->hasSSE41() && "Cannot lower SHL without SSE4.1 or later"); + + if (VT == MVT::v4i32) { + Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse2_pslli_d, MVT::i32), + Op.getOperand(1), DAG.getConstant(23, MVT::i32)); + + ConstantInt *CI = ConstantInt::get(*Context, APInt(32, 0x3f800000U)); + + std::vector<Constant*> CV(4, CI); + Constant *C = ConstantVector::get(CV); + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); + SDValue Addend = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, + PseudoSourceValue::getConstantPool(), 0, + false, false, 16); + + Op = DAG.getNode(ISD::ADD, dl, VT, Op, Addend); + Op = DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4f32, Op); + Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op); + return DAG.getNode(ISD::MUL, dl, VT, Op, R); + } + if (VT == MVT::v16i8) { + // a = a << 5; + Op = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), + Op.getOperand(1), DAG.getConstant(5, MVT::i32)); + + ConstantInt *CM1 = ConstantInt::get(*Context, APInt(8, 15)); + ConstantInt *CM2 = ConstantInt::get(*Context, APInt(8, 63)); + + std::vector<Constant*> CVM1(16, CM1); + std::vector<Constant*> CVM2(16, CM2); + Constant *C = ConstantVector::get(CVM1); + SDValue CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); + SDValue M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, + PseudoSourceValue::getConstantPool(), 0, + false, false, 16); + + // r = pblendv(r, psllw(r & (char16)15, 4), a); + M = DAG.getNode(ISD::AND, dl, VT, R, M); + M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, + DAG.getConstant(4, MVT::i32)); + R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), + R, M, Op); + // a += a + Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); + + C = ConstantVector::get(CVM2); + CPIdx = DAG.getConstantPool(C, getPointerTy(), 16); + M = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx, + PseudoSourceValue::getConstantPool(), 0, false, false, 16); + + // r = pblendv(r, psllw(r & (char16)63, 2), a); + M = DAG.getNode(ISD::AND, dl, VT, R, M); + M = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse2_pslli_w, MVT::i32), M, + DAG.getConstant(2, MVT::i32)); + R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), + R, M, Op); + // a += a + Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op); + + // return pblendv(r, r+r, a); + R = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, VT, + DAG.getConstant(Intrinsic::x86_sse41_pblendvb, MVT::i32), + R, DAG.getNode(ISD::ADD, dl, VT, R, R), Op); + return R; + } + return SDValue(); +} SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { // Lower the "add/sub/mul with overflow" instruction into a regular ins plus @@ -7508,6 +8263,50 @@ SDValue X86TargetLowering::LowerXALUO(SDValue Op, SelectionDAG &DAG) const { return Sum; } +SDValue X86TargetLowering::LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const{ + DebugLoc dl = Op.getDebugLoc(); + + if (!Subtarget->hasSSE2()) { + SDValue Chain = Op.getOperand(0); + SDValue Zero = DAG.getConstant(0, + Subtarget->is64Bit() ? MVT::i64 : MVT::i32); + SDValue Ops[] = { + DAG.getRegister(X86::ESP, MVT::i32), // Base + DAG.getTargetConstant(1, MVT::i8), // Scale + DAG.getRegister(0, MVT::i32), // Index + DAG.getTargetConstant(0, MVT::i32), // Disp + DAG.getRegister(0, MVT::i32), // Segment. + Zero, + Chain + }; + SDNode *Res = + DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops, + array_lengthof(Ops)); + return SDValue(Res, 0); + } + + unsigned isDev = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue(); + if (!isDev) + return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0)); + + unsigned Op1 = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue(); + unsigned Op2 = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); + unsigned Op3 = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); + unsigned Op4 = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); + + // def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; + if (!Op1 && !Op2 && !Op3 && Op4) + return DAG.getNode(X86ISD::SFENCE, dl, MVT::Other, Op.getOperand(0)); + + // def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; + if (Op1 && !Op2 && !Op3 && !Op4) + return DAG.getNode(X86ISD::LFENCE, dl, MVT::Other, Op.getOperand(0)); + + // def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), (i8 1)), + // (MFENCE)>; + return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0)); +} + SDValue X86TargetLowering::LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const { EVT T = Op.getValueType(); DebugLoc dl = Op.getDebugLoc(); @@ -7597,6 +8396,7 @@ SDValue X86TargetLowering::LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const { SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { switch (Op.getOpcode()) { default: llvm_unreachable("Should not custom lower this!"); + case ISD::MEMBARRIER: return LowerMEMBARRIER(Op,DAG); case ISD::ATOMIC_CMP_SWAP: return LowerCMP_SWAP(Op,DAG); case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG); case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG); @@ -7640,6 +8440,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const { case ISD::CTLZ: return LowerCTLZ(Op, DAG); case ISD::CTTZ: return LowerCTTZ(Op, DAG); case ISD::MUL: return LowerMUL_V2I64(Op, DAG); + case ISD::SHL: return LowerSHL(Op, DAG); case ISD::SADDO: case ISD::UADDO: case ISD::SSUBO: @@ -7852,6 +8653,40 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const { case X86ISD::AND: return "X86ISD::AND"; case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM"; case X86ISD::PTEST: return "X86ISD::PTEST"; + case X86ISD::TESTP: return "X86ISD::TESTP"; + case X86ISD::PALIGN: return "X86ISD::PALIGN"; + case X86ISD::PSHUFD: return "X86ISD::PSHUFD"; + case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW"; + case X86ISD::PSHUFHW_LD: return "X86ISD::PSHUFHW_LD"; + case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW"; + case X86ISD::PSHUFLW_LD: return "X86ISD::PSHUFLW_LD"; + case X86ISD::SHUFPS: return "X86ISD::SHUFPS"; + case X86ISD::SHUFPD: return "X86ISD::SHUFPD"; + case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS"; + case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD"; + case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS"; + case X86ISD::MOVHLPD: return "X86ISD::MOVHLPD"; + case X86ISD::MOVLPS: return "X86ISD::MOVLPS"; + case X86ISD::MOVLPD: return "X86ISD::MOVLPD"; + case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP"; + case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP"; + case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP"; + case X86ISD::MOVSHDUP_LD: return "X86ISD::MOVSHDUP_LD"; + case X86ISD::MOVSLDUP_LD: return "X86ISD::MOVSLDUP_LD"; + case X86ISD::MOVSD: return "X86ISD::MOVSD"; + case X86ISD::MOVSS: return "X86ISD::MOVSS"; + case X86ISD::UNPCKLPS: return "X86ISD::UNPCKLPS"; + case X86ISD::UNPCKLPD: return "X86ISD::UNPCKLPD"; + case X86ISD::UNPCKHPS: return "X86ISD::UNPCKHPS"; + case X86ISD::UNPCKHPD: return "X86ISD::UNPCKHPD"; + case X86ISD::PUNPCKLBW: return "X86ISD::PUNPCKLBW"; + case X86ISD::PUNPCKLWD: return "X86ISD::PUNPCKLWD"; + case X86ISD::PUNPCKLDQ: return "X86ISD::PUNPCKLDQ"; + case X86ISD::PUNPCKLQDQ: return "X86ISD::PUNPCKLQDQ"; + case X86ISD::PUNPCKHBW: return "X86ISD::PUNPCKHBW"; + case X86ISD::PUNPCKHWD: return "X86ISD::PUNPCKHWD"; + case X86ISD::PUNPCKHDQ: return "X86ISD::PUNPCKHDQ"; + case X86ISD::PUNPCKHQDQ: return "X86ISD::PUNPCKHQDQ"; case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS"; case X86ISD::MINGW_ALLOCA: return "X86ISD::MINGW_ALLOCA"; } @@ -7863,6 +8698,7 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, const Type *Ty) const { // X86 supports extremely general addressing modes. CodeModel::Model M = getTargetMachine().getCodeModel(); + Reloc::Model R = getTargetMachine().getRelocationModel(); // X86 allows a sign-extended 32-bit immediate field as a displacement. if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != NULL)) @@ -7882,7 +8718,8 @@ bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM, return false; // If lower 4G is not available, then we must use rip-relative addressing. - if (Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) + if ((M != CodeModel::Small || R != Reloc::Static) && + Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1)) return false; } @@ -8368,19 +9205,31 @@ X86TargetLowering::EmitAtomicMinMaxWithCustomInserter(MachineInstr *mInstr, } // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8 -// all of this code can be replaced with that in the .td file. +// or XMM0_V32I8 in AVX all of this code can be replaced with that +// in the .td file. MachineBasicBlock * X86TargetLowering::EmitPCMP(MachineInstr *MI, MachineBasicBlock *BB, unsigned numArgs, bool memArg) const { + assert((Subtarget->hasSSE42() || Subtarget->hasAVX()) && + "Target must have SSE4.2 or AVX features enabled"); + DebugLoc dl = MI->getDebugLoc(); const TargetInstrInfo *TII = getTargetMachine().getInstrInfo(); unsigned Opc; - if (memArg) - Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; - else - Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; + + if (!Subtarget->hasAVX()) { + if (memArg) + Opc = numArgs == 3 ? X86::PCMPISTRM128rm : X86::PCMPESTRM128rm; + else + Opc = numArgs == 3 ? X86::PCMPISTRM128rr : X86::PCMPESTRM128rr; + } else { + if (memArg) + Opc = numArgs == 3 ? X86::VPCMPISTRM128rm : X86::VPCMPESTRM128rm; + else + Opc = numArgs == 3 ? X86::VPCMPISTRM128rr : X86::VPCMPESTRM128rr; + } MachineInstrBuilder MIB = BuildMI(BB, dl, TII->get(Opc)); @@ -8562,7 +9411,8 @@ X86TargetLowering::EmitLoweredMingwAlloca(MachineInstr *MI, .addReg(X86::EAX, RegState::Implicit) .addReg(X86::ESP, RegState::Implicit) .addReg(X86::EAX, RegState::Define | RegState::Implicit) - .addReg(X86::ESP, RegState::Define | RegState::Implicit); + .addReg(X86::ESP, RegState::Define | RegState::Implicit) + .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); MI->eraseFromParent(); // The pseudo instruction is gone now. return BB; @@ -8579,6 +9429,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, = static_cast<const X86InstrInfo*>(getTargetMachine().getInstrInfo()); DebugLoc DL = MI->getDebugLoc(); MachineFunction *F = BB->getParent(); + bool IsWin64 = Subtarget->isTargetWin64(); assert(MI->getOperand(3).isGlobal() && "This should be a global"); @@ -8590,7 +9441,7 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI, .addGlobalAddress(MI->getOperand(3).getGlobal(), 0, MI->getOperand(3).getTargetFlags()) .addReg(0); - MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m)); + MIB = BuildMI(*BB, MI, DL, TII->get(IsWin64 ? X86::WINCALL64m : X86::CALL64m)); addDirectMem(MIB, X86::RDI); } else if (getTargetMachine().getRelocationModel() != Reloc::PIC_) { MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, @@ -8727,12 +9578,16 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI, } // String/text processing lowering. case X86::PCMPISTRM128REG: + case X86::VPCMPISTRM128REG: return EmitPCMP(MI, BB, 3, false /* in-mem */); case X86::PCMPISTRM128MEM: + case X86::VPCMPISTRM128MEM: return EmitPCMP(MI, BB, 3, true /* in-mem */); case X86::PCMPESTRM128REG: + case X86::VPCMPESTRM128REG: return EmitPCMP(MI, BB, 5, false /* in mem */); case X86::PCMPESTRM128MEM: + case X86::VPCMPESTRM128MEM: return EmitPCMP(MI, BB, 5, true /* in mem */); // Atomic Lowering. @@ -8966,21 +9821,20 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI) { DebugLoc dl = N->getDebugLoc(); EVT VT = N->getValueType(0); - ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(N); if (VT.getSizeInBits() != 128) return SDValue(); SmallVector<SDValue, 16> Elts; for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i) - Elts.push_back(DAG.getShuffleScalarElt(SVN, i)); - + Elts.push_back(getShuffleScalarElt(N, i, DAG, 0)); + return EltsFromConsecutiveLoads(VT, Elts, dl, DAG); } -/// PerformShuffleCombine - Detect vector gather/scatter index generation -/// and convert it from being a bunch of shuffles and extracts to a simple -/// store and scalar loads to extract the elements. +/// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index +/// generation and convert it from being a bunch of shuffles and extracts +/// to a simple store and scalar loads to extract the elements. static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, const TargetLowering &TLI) { SDValue InputVector = N->getOperand(0); @@ -9030,8 +9884,8 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, // Store the value to a temporary stack slot. SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType()); - SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, NULL, 0, - false, false, 0); + SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr, NULL, + 0, false, false, 0); // Replace each use (extract) with a load of the appropriate element. for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(), @@ -9045,11 +9899,12 @@ static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG, uint64_t Offset = EltSize * cast<ConstantSDNode>(Idx)->getZExtValue(); SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy()); - SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), OffsetVal, StackPtr); + SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, Idx.getValueType(), + OffsetVal, StackPtr); // Load the scalar. - SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, ScalarAddr, - NULL, 0, false, false, 0); + SDValue LoadScalar = DAG.getLoad(Extract->getValueType(0), dl, Ch, + ScalarAddr, NULL, 0, false, false, 0); // Replace the exact with the load. DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), LoadScalar); @@ -9087,8 +9942,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // Converting this to a min would handle NaNs incorrectly, and swapping // the operands would cause it to handle comparisons between positive // and negative zero incorrectly. - if (!FiniteOnlyFPMath() && - (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) { + if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { if (!UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; @@ -9126,8 +9980,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // Converting this to a max would handle NaNs incorrectly, and swapping // the operands would cause it to handle comparisons between positive // and negative zero incorrectly. - if (!FiniteOnlyFPMath() && - (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) { + if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) { if (!UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) break; @@ -9156,8 +10009,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // cause it to handle NaNs incorrectly. if (!UnsafeFPMath && !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) { - if (!FiniteOnlyFPMath() && - (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) + if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) break; std::swap(LHS, RHS); } @@ -9182,8 +10034,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, case ISD::SETULT: // Converting this to a max would handle NaNs incorrectly. - if (!FiniteOnlyFPMath() && - (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) + if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) break; Opcode = X86ISD::FMAX; break; @@ -9193,8 +10044,7 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG, // cause it to handle NaNs incorrectly. if (!UnsafeFPMath && !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) { - if (!FiniteOnlyFPMath() && - (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))) + if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) break; std::swap(LHS, RHS); } @@ -9905,7 +10755,6 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, SelectionDAG &DAG = DCI.DAG; switch (N->getOpcode()) { default: break; - case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this); case ISD::EXTRACT_VECTOR_ELT: return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, *this); case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget); @@ -9922,6 +10771,28 @@ SDValue X86TargetLowering::PerformDAGCombine(SDNode *N, case X86ISD::BT: return PerformBTCombine(N, DAG, DCI); case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG); case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG); + case X86ISD::SHUFPS: // Handle all target specific shuffles + case X86ISD::SHUFPD: + case X86ISD::PUNPCKHBW: + case X86ISD::PUNPCKHWD: + case X86ISD::PUNPCKHDQ: + case X86ISD::PUNPCKHQDQ: + case X86ISD::UNPCKHPS: + case X86ISD::UNPCKHPD: + case X86ISD::PUNPCKLBW: + case X86ISD::PUNPCKLWD: + case X86ISD::PUNPCKLDQ: + case X86ISD::PUNPCKLQDQ: + case X86ISD::UNPCKLPS: + case X86ISD::UNPCKLPD: + case X86ISD::MOVHLPS: + case X86ISD::MOVLHPS: + case X86ISD::PSHUFD: + case X86ISD::PSHUFHW: + case X86ISD::PSHUFLW: + case X86ISD::MOVSS: + case X86ISD::MOVSD: + case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this); } return SDValue(); @@ -9956,14 +10827,6 @@ bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const { } } -static bool MayFoldLoad(SDValue Op) { - return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode()); -} - -static bool MayFoldIntoStore(SDValue Op) { - return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin()); -} - /// IsDesirableToPromoteOp - This method query the target whether it is /// beneficial for dag combiner to promote the specified node. If true, it /// should return the desired promotion type by reference. diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 4e4daa4bc5ca..d2d9b28a0396 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -248,6 +248,44 @@ namespace llvm { // PTEST - Vector bitwise comparisons PTEST, + // TESTP - Vector packed fp sign bitwise comparisons + TESTP, + + // Several flavors of instructions with vector shuffle behaviors. + PALIGN, + PSHUFD, + PSHUFHW, + PSHUFLW, + PSHUFHW_LD, + PSHUFLW_LD, + SHUFPD, + SHUFPS, + MOVDDUP, + MOVSHDUP, + MOVSLDUP, + MOVSHDUP_LD, + MOVSLDUP_LD, + MOVLHPS, + MOVLHPD, + MOVHLPS, + MOVHLPD, + MOVLPS, + MOVLPD, + MOVSD, + MOVSS, + UNPCKLPS, + UNPCKLPD, + UNPCKHPS, + UNPCKHPD, + PUNPCKLBW, + PUNPCKLWD, + PUNPCKLDQ, + PUNPCKLQDQ, + PUNPCKHBW, + PUNPCKHWD, + PUNPCKHDQ, + PUNPCKHQDQ, + // VASTART_SAVE_XMM_REGS - Save xmm argument registers to the stack, // according to %al. An operator is needed so that this can be expanded // with control flow. @@ -265,7 +303,13 @@ namespace llvm { ATOMXOR64_DAG, ATOMAND64_DAG, ATOMNAND64_DAG, - ATOMSWAP64_DAG + ATOMSWAP64_DAG, + + // Memory barrier + MEMBARRIER, + MFENCE, + SFENCE, + LFENCE // WARNING: Do not add anything in the end unless you want the node to // have memop! In fact, starting from ATOMADD64_DAG all opcodes will be @@ -584,12 +628,19 @@ namespace llvm { /// getFunctionAlignment - Return the Log2 alignment of this function. virtual unsigned getFunctionAlignment(const Function *F) const; + unsigned getRegPressureLimit(const TargetRegisterClass *RC, + MachineFunction &MF) const; + /// getStackCookieLocation - Return true if the target stores stack /// protector cookies at a fixed offset in some non-standard address /// space, and populates the address space and offset as /// appropriate. virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; + protected: + std::pair<const TargetRegisterClass*, uint8_t> + findRepresentativeClass(EVT VT) const; + private: /// Subtarget - Keep a pointer to the X86Subtarget around so that we can /// make the right decision when generating code for different targets. @@ -710,11 +761,16 @@ namespace llvm { SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) const; SDValue LowerMUL_V2I64(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerSHL(SDValue Op, SelectionDAG &DAG) const; SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) const; SDValue LowerCMP_SWAP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) const; SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerMEMBARRIER(SDValue Op, SelectionDAG &DAG) const; + + // Utility functions to help LowerVECTOR_SHUFFLE + SDValue LowerVECTOR_SHUFFLEv8i16(SDValue Op, SelectionDAG &DAG) const; virtual SDValue LowerFormalArguments(SDValue Chain, diff --git a/lib/Target/X86/X86Instr64bit.td b/lib/Target/X86/X86Instr64bit.td index 42d0e7f9778a..0884b61425e9 100644 --- a/lib/Target/X86/X86Instr64bit.td +++ b/lib/Target/X86/X86Instr64bit.td @@ -73,11 +73,7 @@ def GetLo32XForm : SDNodeXForm<imm, [{ return getI32Imm((unsigned)N->getZExtValue()); }]>; -def i64immSExt32 : PatLeaf<(i64 imm), [{ - // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit - // sign extended field. - return (int64_t)N->getZExtValue() == (int32_t)N->getZExtValue(); -}]>; +def i64immSExt32 : PatLeaf<(i64 imm), [{ return i64immSExt32(N); }]>; def i64immZExt32 : PatLeaf<(i64 imm), [{ @@ -158,7 +154,7 @@ let isCall = 1 in // FIXME: We need to teach codegen about single list of call-clobbered // registers. -let isCall = 1 in +let isCall = 1, isCodeGenOnly = 1 in // All calls clobber the non-callee saved registers. RSP is marked as // a use to prevent stack-pointer assignments that appear immediately // before calls from potentially appearing dead. Uses for argument @@ -168,7 +164,7 @@ let isCall = 1 in MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, EFLAGS], Uses = [RSP] in { - def WINCALL64pcrel32 : I<0xE8, RawFrm, + def WINCALL64pcrel32 : Ii32PCRel<0xE8, RawFrm, (outs), (ins i64i32imm_pcrel:$dst, variable_ops), "call\t$dst", []>, Requires<[IsWin64]>; @@ -182,7 +178,8 @@ let isCall = 1 in } -let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in +let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, + isCodeGenOnly = 1 in let Defs = [RAX, RCX, RDX, RSI, RDI, R8, R9, R10, R11, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, ST1, MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, @@ -216,9 +213,9 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def JMP64pcrel32 : I<0xE9, RawFrm, (outs), (ins brtarget:$dst), "jmp{q}\t$dst", []>; def JMP64r : I<0xFF, MRM4r, (outs), (ins GR64:$dst), "jmp{q}\t{*}$dst", - [(brind GR64:$dst)]>; + [(brind GR64:$dst)]>, Requires<[In64BitMode]>; def JMP64m : I<0xFF, MRM4m, (outs), (ins i64mem:$dst), "jmp{q}\t{*}$dst", - [(brind (loadi64 addr:$dst))]>; + [(brind (loadi64 addr:$dst))]>, Requires<[In64BitMode]>; def FARJMP64 : RI<0xFF, MRM5m, (outs), (ins opaque80mem:$dst), "ljmp{q}\t{*}$dst", []>; } @@ -246,7 +243,7 @@ def POPCNT64rm : RI<0xB8, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), let Defs = [RBP,RSP], Uses = [RBP,RSP], mayLoad = 1, neverHasSideEffects = 1 in def LEAVE64 : I<0xC9, RawFrm, - (outs), (ins), "leave", []>; + (outs), (ins), "leave", []>, Requires<[In64BitMode]>; let Defs = [RSP], Uses = [RSP], neverHasSideEffects=1 in { let mayLoad = 1 in { def POP64r : I<0x58, AddRegFrm, @@ -330,7 +327,7 @@ def CMPS64 : RI<0xA7, RawFrm, (outs), (ins), "cmpsq", []>; // Fast system-call instructions def SYSEXIT64 : RI<0x35, RawFrm, - (outs), (ins), "sysexit", []>, TB; + (outs), (ins), "sysexit", []>, TB, Requires<[In64BitMode]>; //===----------------------------------------------------------------------===// // Move Instructions... @@ -374,6 +371,7 @@ def MOV64mi32 : RIi32<0xC7, MRM0m, (outs), (ins i64mem:$dst, i64i32imm:$src), [(store i64immSExt32:$src, addr:$dst)]>; /// Versions of MOV64rr, MOV64rm, and MOV64mr for i64mem_TC and GR64_TC. +let isCodeGenOnly = 1 in { let neverHasSideEffects = 1 in def MOV64rr_TC : RI<0x89, MRMDestReg, (outs GR64_TC:$dst), (ins GR64_TC:$src), "mov{q}\t{$src, $dst|$dst, $src}", []>; @@ -388,7 +386,13 @@ let mayStore = 1 in def MOV64mr_TC : RI<0x89, MRMDestMem, (outs), (ins i64mem_TC:$dst, GR64_TC:$src), "mov{q}\t{$src, $dst|$dst, $src}", []>; +} +// FIXME: These definitions are utterly broken +// Just leave them commented out for now because they're useless outside +// of the large code model, and most compilers won't generate the instructions +// in question. +/* def MOV64o8a : RIi8<0xA0, RawFrm, (outs), (ins offset8:$src), "mov{q}\t{$src, %rax|%rax, $src}", []>; def MOV64o64a : RIi32<0xA1, RawFrm, (outs), (ins offset64:$src), @@ -397,6 +401,7 @@ def MOV64ao8 : RIi8<0xA2, RawFrm, (outs offset8:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; def MOV64ao64 : RIi32<0xA3, RawFrm, (outs offset64:$dst), (ins), "mov{q}\t{%rax, $dst|$dst, %rax}", []>; +*/ // Moves to and from segment registers def MOV64rs : RI<0x8C, MRMDestReg, (outs GR64:$dst), (ins SEGMENT_REG:$src), @@ -1316,14 +1321,13 @@ def BT64mr : RI<0xA3, MRMDestMem, (outs), (ins i64mem:$src1, GR64:$src2), [] >, TB; -def BT64ri8 : Ii8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), +def BT64ri8 : RIi8<0xBA, MRM4r, (outs), (ins GR64:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", - [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB, - REX_W; + [(set EFLAGS, (X86bt GR64:$src1, i64immSExt8:$src2))]>, TB; // Note that these instructions don't need FastBTMem because that // only applies when the other operand is in a register. When it's // an immediate, bt is still fast. -def BT64mi8 : Ii8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), +def BT64mi8 : RIi8<0xBA, MRM4m, (outs), (ins i64mem:$src1, i64i8imm:$src2), "bt{q}\t{$src2, $src1|$src1, $src2}", [(set EFLAGS, (X86bt (loadi64 addr:$src1), i64immSExt8:$src2))]>, TB; @@ -1537,116 +1541,6 @@ def : Pat<(i64 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), (SETB_C64r)>; //===----------------------------------------------------------------------===// -// Conversion Instructions... -// - -// f64 -> signed i64 -def CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src), - "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; -def CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src), - "cvtsd2si{q}\t{$src, $dst|$dst, $src}", []>; -def Int_CVTSD2SI64rr: RSDI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), - "cvtsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse2_cvtsd2si64 VR128:$src))]>; -def Int_CVTSD2SI64rm: RSDI<0x2D, MRMSrcMem, (outs GR64:$dst), - (ins f128mem:$src), - "cvtsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (int_x86_sse2_cvtsd2si64 - (load addr:$src)))]>; -def CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR64:$src), - "cvttsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (fp_to_sint FR64:$src))]>; -def CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f64mem:$src), - "cvttsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (fp_to_sint (loadf64 addr:$src)))]>; -def Int_CVTTSD2SI64rr: RSDI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), - "cvttsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse2_cvttsd2si64 VR128:$src))]>; -def Int_CVTTSD2SI64rm: RSDI<0x2C, MRMSrcMem, (outs GR64:$dst), - (ins f128mem:$src), - "cvttsd2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse2_cvttsd2si64 - (load addr:$src)))]>; - -// Signed i64 -> f64 -def CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src), - "cvtsi2sd{q}\t{$src, $dst|$dst, $src}", - [(set FR64:$dst, (sint_to_fp GR64:$src))]>; -def CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src), - "cvtsi2sd{q}\t{$src, $dst|$dst, $src}", - [(set FR64:$dst, (sint_to_fp (loadi64 addr:$src)))]>; - -let Constraints = "$src1 = $dst" in { -def Int_CVTSI2SD64rr: RSDI<0x2A, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, GR64:$src2), - "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}", - [(set VR128:$dst, - (int_x86_sse2_cvtsi642sd VR128:$src1, - GR64:$src2))]>; -def Int_CVTSI2SD64rm: RSDI<0x2A, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, i64mem:$src2), - "cvtsi2sd{q}\t{$src2, $dst|$dst, $src2}", - [(set VR128:$dst, - (int_x86_sse2_cvtsi642sd VR128:$src1, - (loadi64 addr:$src2)))]>; -} // Constraints = "$src1 = $dst" - -// Signed i64 -> f32 -def CVTSI2SS64rr: RSSI<0x2A, MRMSrcReg, (outs FR32:$dst), (ins GR64:$src), - "cvtsi2ss{q}\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (sint_to_fp GR64:$src))]>; -def CVTSI2SS64rm: RSSI<0x2A, MRMSrcMem, (outs FR32:$dst), (ins i64mem:$src), - "cvtsi2ss{q}\t{$src, $dst|$dst, $src}", - [(set FR32:$dst, (sint_to_fp (loadi64 addr:$src)))]>; - -let Constraints = "$src1 = $dst" in { - def Int_CVTSI2SS64rr : RSSI<0x2A, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, GR64:$src2), - "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}", - [(set VR128:$dst, - (int_x86_sse_cvtsi642ss VR128:$src1, - GR64:$src2))]>; - def Int_CVTSI2SS64rm : RSSI<0x2A, MRMSrcMem, - (outs VR128:$dst), - (ins VR128:$src1, i64mem:$src2), - "cvtsi2ss{q}\t{$src2, $dst|$dst, $src2}", - [(set VR128:$dst, - (int_x86_sse_cvtsi642ss VR128:$src1, - (loadi64 addr:$src2)))]>; -} // Constraints = "$src1 = $dst" - -// f32 -> signed i64 -def CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src), - "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; -def CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), - "cvtss2si{q}\t{$src, $dst|$dst, $src}", []>; -def Int_CVTSS2SI64rr: RSSI<0x2D, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), - "cvtss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse_cvtss2si64 VR128:$src))]>; -def Int_CVTSS2SI64rm: RSSI<0x2D, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), - "cvtss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (int_x86_sse_cvtss2si64 - (load addr:$src)))]>; -def CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins FR32:$src), - "cvttss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (fp_to_sint FR32:$src))]>; -def CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), (ins f32mem:$src), - "cvttss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, (fp_to_sint (loadf32 addr:$src)))]>; -def Int_CVTTSS2SI64rr: RSSI<0x2C, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), - "cvttss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse_cvttss2si64 VR128:$src))]>; -def Int_CVTTSS2SI64rm: RSSI<0x2C, MRMSrcMem, (outs GR64:$dst), - (ins f32mem:$src), - "cvttss2si{q}\t{$src, $dst|$dst, $src}", - [(set GR64:$dst, - (int_x86_sse_cvttss2si64 (load addr:$src)))]>; - // Descriptor-table support instructions // LLDT is not interpreted specially in 64-bit mode because there is no sign @@ -1726,6 +1620,14 @@ def MOV64FSrm : RI<0x8B, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src), // Atomic Instructions //===----------------------------------------------------------------------===// +// TODO: Get this to fold the constant into the instruction. +let hasSideEffects = 1, Defs = [ESP] in +def Int_MemBarrierNoSSE64 : RI<0x09, MRM1r, (outs), (ins GR64:$zero), + "lock\n\t" + "or{q}\t{$zero, (%rsp)|(%rsp), $zero}", + [(X86MemBarrierNoSSE GR64:$zero)]>, + Requires<[In64BitMode]>, LOCK; + let Defs = [RAX, EFLAGS], Uses = [RAX] in { def LCMPXCHG64 : RI<0xB1, MRMDestMem, (outs), (ins i64mem:$ptr, GR64:$swap), "lock\n\t" @@ -1772,7 +1674,7 @@ def XCHG64ar : RI<0x90, AddRegFrm, (outs), (ins GR64:$src), // Optimized codegen when the non-memory output is not used. let Defs = [EFLAGS], mayLoad = 1, mayStore = 1 in { // FIXME: Use normal add / sub instructions and add lock prefix dynamically. -def LOCK_ADD64mr : RI<0x03, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), +def LOCK_ADD64mr : RI<0x01, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src2), "lock\n\t" "add{q}\t{$src2, $dst|$dst, $src2}", []>, LOCK; def LOCK_ADD64mi8 : RIi8<0x83, MRM0m, (outs), diff --git a/lib/Target/X86/X86InstrFMA.td b/lib/Target/X86/X86InstrFMA.td new file mode 100644 index 000000000000..d868773d2d69 --- /dev/null +++ b/lib/Target/X86/X86InstrFMA.td @@ -0,0 +1,60 @@ +//====- X86InstrFMA.td - Describe the X86 Instruction Set --*- tablegen -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This file describes FMA (Fused Multiply-Add) instructions. +// +//===----------------------------------------------------------------------===// + +//===----------------------------------------------------------------------===// +// FMA3 - Intel 3 operand Fused Multiply-Add instructions +//===----------------------------------------------------------------------===// + +multiclass fma_rm<bits<8> opc, string OpcodeStr> { + def r : FMA3<opc, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; + def m : FMA3<opc, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, f128mem:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; + def rY : FMA3<opc, MRMSrcReg, (outs VR256:$dst), + (ins VR256:$src1, VR256:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; + def mY : FMA3<opc, MRMSrcMem, (outs VR256:$dst), + (ins VR256:$src1, f256mem:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; +} + +multiclass fma_forms<bits<8> opc132, bits<8> opc213, bits<8> opc231, + string OpcodeStr, string PackTy> { + defm r132 : fma_rm<opc132, !strconcat(OpcodeStr, !strconcat("132", PackTy))>; + defm r213 : fma_rm<opc213, !strconcat(OpcodeStr, !strconcat("213", PackTy))>; + defm r231 : fma_rm<opc231, !strconcat(OpcodeStr, !strconcat("231", PackTy))>; +} + +let isAsmParserOnly = 1 in { + // Fused Multiply-Add + defm VFMADDPS : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "ps">; + defm VFMADDPD : fma_forms<0x98, 0xA8, 0xB8, "vfmadd", "pd">, VEX_W; + defm VFMADDSUBPS : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "ps">; + defm VFMADDSUBPD : fma_forms<0x96, 0xA6, 0xB6, "vfmaddsub", "pd">, VEX_W; + defm VFMSUBADDPS : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "ps">; + defm VFMSUBADDPD : fma_forms<0x97, 0xA7, 0xB7, "vfmsubadd", "pd">, VEX_W; + defm VFMSUBPS : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "ps">; + defm VFMSUBPD : fma_forms<0x9A, 0xAA, 0xBA, "vfmsub", "pd">, VEX_W; + + // Fused Negative Multiply-Add + defm VFNMADDPS : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "ps">; + defm VFNMADDPD : fma_forms<0x9C, 0xAC, 0xBC, "vfnmadd", "pd">, VEX_W; + defm VFNMSUBPS : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "ps">; + defm VFNMSUBPD : fma_forms<0x9E, 0xAE, 0xBE, "vfnmsub", "pd">, VEX_W; +} diff --git a/lib/Target/X86/X86InstrFPStack.td b/lib/Target/X86/X86InstrFPStack.td index da93de988d50..9c9bcc7d0b6a 100644 --- a/lib/Target/X86/X86InstrFPStack.td +++ b/lib/Target/X86/X86InstrFPStack.td @@ -108,10 +108,6 @@ let usesCustomInserter = 1 in { // Expanded after instruction selection. [(X86fp_to_i64mem RFP80:$src, addr:$dst)]>; } -let isTerminator = 1 in - let Defs = [FP0, FP1, FP2, FP3, FP4, FP5, FP6] in - def FP_REG_KILL : I<0, Pseudo, (outs), (ins), "##FP_REG_KILL", []>; - // All FP Stack operations are represented with four instructions here. The // first three instructions, generated by the instruction selector, use "RFP32" // "RFP64" or "RFP80" registers: traditional register files to reference 32-bit, @@ -157,7 +153,7 @@ def FpSET_ST1_64 : FpI_<(outs), (ins RFP64:$src), SpecialFP, []>; // ST(1) = FPR def FpSET_ST1_80 : FpI_<(outs), (ins RFP80:$src), SpecialFP, []>; // ST(1) = FPR } -// FpIf32, FpIf64 - Floating Point Psuedo Instruction template. +// FpIf32, FpIf64 - Floating Point Pseudo Instruction template. // f32 instructions can use SSE1 and are predicated on FPStackf32 == !SSE1. // f64 instructions can use SSE2 and are predicated on FPStackf64 == !SSE2. // f80 instructions cannot use SSE and use neither of these. diff --git a/lib/Target/X86/X86InstrFormats.td b/lib/Target/X86/X86InstrFormats.td index cc3fdf1efd7b..79187e9a76d7 100644 --- a/lib/Target/X86/X86InstrFormats.td +++ b/lib/Target/X86/X86InstrFormats.td @@ -39,6 +39,7 @@ def MRM_E8 : Format<39>; def MRM_F0 : Format<40>; def MRM_F8 : Format<41>; def MRM_F9 : Format<42>; +def RawFrmImm16 : Format<43>; // ImmType - This specifies the immediate type used by an instruction. This is // part of the ad-hoc solution used to emit machine instruction encodings by our @@ -210,7 +211,7 @@ class Ii32PCRel<bits<8> o, Format f, dag outs, dag ins, string asm, class FPI<bits<8> o, Format F, dag outs, dag ins, string asm> : I<o, F, outs, ins, asm, []> {} -// FpI_ - Floating Point Psuedo Instruction template. Not Predicated. +// FpI_ - Floating Point Pseudo Instruction template. Not Predicated. class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern> : X86Inst<0, Pseudo, NoImm, outs, ins, ""> { let FPForm = fp; @@ -224,13 +225,13 @@ class FpI_<dag outs, dag ins, FPFormat fp, list<dag> pattern> // Iseg32 - 16-bit segment selector, 32-bit offset class Iseg16 <bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern> : X86Inst<o, f, NoImm, outs, ins, asm> { + list<dag> pattern> : X86Inst<o, f, Imm16, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } class Iseg32 <bits<8> o, Format f, dag outs, dag ins, string asm, - list<dag> pattern> : X86Inst<o, f, NoImm, outs, ins, asm> { + list<dag> pattern> : X86Inst<o, f, Imm32, outs, ins, asm> { let Pattern = pattern; let CodeSize = 3; } @@ -411,6 +412,20 @@ class SS42AI<bits<8> o, Format F, dag outs, dag ins, string asm, : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, Requires<[HasSSE42]>; +// AVX Instruction Templates: +// Instructions introduced in AVX (no SSE equivalent forms) +// +// AVX8I - AVX instructions with T8 and OpSize prefix. +// AVXAIi8 - AVX instructions with TA, OpSize prefix and ImmT = Imm8. +class AVX8I<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> + : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, OpSize, + Requires<[HasAVX]>; +class AVXAIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag> pattern> + : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, OpSize, + Requires<[HasAVX]>; + // AES Instruction Templates: // // AES8I @@ -425,6 +440,18 @@ class AESAI<bits<8> o, Format F, dag outs, dag ins, string asm, : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, Requires<[HasAES]>; +// CLMUL Instruction Templates +class CLMULIi8<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag>pattern> + : Ii8<o, F, outs, ins, asm, pattern, SSEPackedInt>, TA, + OpSize, VEX_4V, Requires<[HasAVX, HasCLMUL]>; + +// FMA3 Instruction Templates +class FMA3<bits<8> o, Format F, dag outs, dag ins, string asm, + list<dag>pattern> + : I<o, F, outs, ins, asm, pattern, SSEPackedInt>, T8, + OpSize, VEX_4V, Requires<[HasFMA3]>; + // X86-64 Instruction templates... // diff --git a/lib/Target/X86/X86InstrFragmentsSIMD.td b/lib/Target/X86/X86InstrFragmentsSIMD.td index 71c4e8bc147f..01149b699213 100644 --- a/lib/Target/X86/X86InstrFragmentsSIMD.td +++ b/lib/Target/X86/X86InstrFragmentsSIMD.td @@ -117,9 +117,67 @@ def X86pcmpgtd : SDNode<"X86ISD::PCMPGTD", SDTIntBinOp>; def X86pcmpgtq : SDNode<"X86ISD::PCMPGTQ", SDTIntBinOp>; def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>, - SDTCisVT<1, v4f32>, - SDTCisVT<2, v4f32>]>; + SDTCisVec<1>, + SDTCisSameAs<2, 1>]>; def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>; +def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>; + +// Specific shuffle nodes - At some point ISD::VECTOR_SHUFFLE will always get +// translated into one of the target nodes below during lowering. +// Note: this is a work in progress... +def SDTShuff1Op : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; +def SDTShuff2Op : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisSameAs<0,1>, + SDTCisSameAs<0,2>]>; + +def SDTShuff2OpI : SDTypeProfile<1, 2, [SDTCisVec<0>, + SDTCisSameAs<0,1>, SDTCisInt<2>]>; +def SDTShuff3OpI : SDTypeProfile<1, 3, [SDTCisVec<0>, SDTCisSameAs<0,1>, + SDTCisSameAs<0,2>, SDTCisInt<3>]>; + +def SDTShuff2OpLdI : SDTypeProfile<1, 2, [SDTCisVec<0>, SDTCisPtrTy<1>, + SDTCisInt<2>]>; + +def X86PAlign : SDNode<"X86ISD::PALIGN", SDTShuff3OpI>; + +def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>; +def X86PShufhw : SDNode<"X86ISD::PSHUFHW", SDTShuff2OpI>; +def X86PShuflw : SDNode<"X86ISD::PSHUFLW", SDTShuff2OpI>; + +def X86PShufhwLd : SDNode<"X86ISD::PSHUFHW_LD", SDTShuff2OpLdI>; +def X86PShuflwLd : SDNode<"X86ISD::PSHUFLW_LD", SDTShuff2OpLdI>; + +def X86Shufpd : SDNode<"X86ISD::SHUFPD", SDTShuff3OpI>; +def X86Shufps : SDNode<"X86ISD::SHUFPS", SDTShuff3OpI>; + +def X86Movddup : SDNode<"X86ISD::MOVDDUP", SDTShuff1Op>; +def X86Movshdup : SDNode<"X86ISD::MOVSHDUP", SDTShuff1Op>; +def X86Movsldup : SDNode<"X86ISD::MOVSLDUP", SDTShuff1Op>; + +def X86Movsd : SDNode<"X86ISD::MOVSD", SDTShuff2Op>; +def X86Movss : SDNode<"X86ISD::MOVSS", SDTShuff2Op>; + +def X86Movlhps : SDNode<"X86ISD::MOVLHPS", SDTShuff2Op>; +def X86Movlhpd : SDNode<"X86ISD::MOVLHPD", SDTShuff2Op>; +def X86Movhlps : SDNode<"X86ISD::MOVHLPS", SDTShuff2Op>; +def X86Movhlpd : SDNode<"X86ISD::MOVHLPD", SDTShuff2Op>; + +def X86Movlps : SDNode<"X86ISD::MOVLPS", SDTShuff2Op>; +def X86Movlpd : SDNode<"X86ISD::MOVLPD", SDTShuff2Op>; + +def X86Unpcklps : SDNode<"X86ISD::UNPCKLPS", SDTShuff2Op>; +def X86Unpcklpd : SDNode<"X86ISD::UNPCKLPD", SDTShuff2Op>; +def X86Unpckhps : SDNode<"X86ISD::UNPCKHPS", SDTShuff2Op>; +def X86Unpckhpd : SDNode<"X86ISD::UNPCKHPD", SDTShuff2Op>; + +def X86Punpcklbw : SDNode<"X86ISD::PUNPCKLBW", SDTShuff2Op>; +def X86Punpcklwd : SDNode<"X86ISD::PUNPCKLWD", SDTShuff2Op>; +def X86Punpckldq : SDNode<"X86ISD::PUNPCKLDQ", SDTShuff2Op>; +def X86Punpcklqdq : SDNode<"X86ISD::PUNPCKLQDQ", SDTShuff2Op>; + +def X86Punpckhbw : SDNode<"X86ISD::PUNPCKHBW", SDTShuff2Op>; +def X86Punpckhwd : SDNode<"X86ISD::PUNPCKHWD", SDTShuff2Op>; +def X86Punpckhdq : SDNode<"X86ISD::PUNPCKHDQ", SDTShuff2Op>; +def X86Punpckhqdq : SDNode<"X86ISD::PUNPCKHQDQ", SDTShuff2Op>; //===----------------------------------------------------------------------===// // SSE Complex Patterns @@ -148,12 +206,13 @@ def sdmem : Operand<v2f64> { // SSE pattern fragments //===----------------------------------------------------------------------===// +// 128-bit load pattern fragments def loadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (load node:$ptr))>; def loadv2f64 : PatFrag<(ops node:$ptr), (v2f64 (load node:$ptr))>; def loadv4i32 : PatFrag<(ops node:$ptr), (v4i32 (load node:$ptr))>; def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; -// FIXME: move this to a more appropriate place after all AVX is done. +// 256-bit load pattern fragments def loadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (load node:$ptr))>; def loadv4f64 : PatFrag<(ops node:$ptr), (v4f64 (load node:$ptr))>; def loadv8i32 : PatFrag<(ops node:$ptr), (v8i32 (load node:$ptr))>; @@ -174,6 +233,8 @@ def alignedloadfsf32 : PatFrag<(ops node:$ptr), (f32 (alignedload node:$ptr))>; def alignedloadfsf64 : PatFrag<(ops node:$ptr), (f64 (alignedload node:$ptr))>; + +// 128-bit aligned load pattern fragments def alignedloadv4f32 : PatFrag<(ops node:$ptr), (v4f32 (alignedload node:$ptr))>; def alignedloadv2f64 : PatFrag<(ops node:$ptr), @@ -183,7 +244,7 @@ def alignedloadv4i32 : PatFrag<(ops node:$ptr), def alignedloadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (alignedload node:$ptr))>; -// FIXME: move this to a more appropriate place after all AVX is done. +// 256-bit aligned load pattern fragments def alignedloadv8f32 : PatFrag<(ops node:$ptr), (v8f32 (alignedload node:$ptr))>; def alignedloadv4f64 : PatFrag<(ops node:$ptr), @@ -206,15 +267,20 @@ def memop : PatFrag<(ops node:$ptr), (load node:$ptr), [{ def memopfsf32 : PatFrag<(ops node:$ptr), (f32 (memop node:$ptr))>; def memopfsf64 : PatFrag<(ops node:$ptr), (f64 (memop node:$ptr))>; + +// 128-bit memop pattern fragments def memopv4f32 : PatFrag<(ops node:$ptr), (v4f32 (memop node:$ptr))>; def memopv2f64 : PatFrag<(ops node:$ptr), (v2f64 (memop node:$ptr))>; def memopv4i32 : PatFrag<(ops node:$ptr), (v4i32 (memop node:$ptr))>; def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; def memopv16i8 : PatFrag<(ops node:$ptr), (v16i8 (memop node:$ptr))>; -// FIXME: move this to a more appropriate place after all AVX is done. +// 256-bit memop pattern fragments +def memopv32i8 : PatFrag<(ops node:$ptr), (v32i8 (memop node:$ptr))>; def memopv8f32 : PatFrag<(ops node:$ptr), (v8f32 (memop node:$ptr))>; def memopv4f64 : PatFrag<(ops node:$ptr), (v4f64 (memop node:$ptr))>; +def memopv4i64 : PatFrag<(ops node:$ptr), (v4i64 (memop node:$ptr))>; +def memopv8i32 : PatFrag<(ops node:$ptr), (v8i32 (memop node:$ptr))>; // SSSE3 uses MMX registers for some instructions. They aren't aligned on a // 16-byte boundary. @@ -254,6 +320,7 @@ def unalignednontemporalstore : PatFrag<(ops node:$val, node:$ptr), return false; }]>; +// 128-bit bitconvert pattern fragments def bc_v4f32 : PatFrag<(ops node:$in), (v4f32 (bitconvert node:$in))>; def bc_v2f64 : PatFrag<(ops node:$in), (v2f64 (bitconvert node:$in))>; def bc_v16i8 : PatFrag<(ops node:$in), (v16i8 (bitconvert node:$in))>; @@ -261,6 +328,9 @@ def bc_v8i16 : PatFrag<(ops node:$in), (v8i16 (bitconvert node:$in))>; def bc_v4i32 : PatFrag<(ops node:$in), (v4i32 (bitconvert node:$in))>; def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; +// 256-bit bitconvert pattern fragments +def bc_v8i32 : PatFrag<(ops node:$in), (v8i32 (bitconvert node:$in))>; + def vzmovl_v2i64 : PatFrag<(ops node:$src), (bitconvert (v2i64 (X86vzmovl (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index ce471eadd78b..5280940cf437 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -235,6 +235,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) { X86::BT64ri8, X86::BT64mi8, 1, 0 }, { X86::CALL32r, X86::CALL32m, 1, 0 }, { X86::CALL64r, X86::CALL64m, 1, 0 }, + { X86::WINCALL64r, X86::WINCALL64m, 1, 0 }, { X86::CMP16ri, X86::CMP16mi, 1, 0 }, { X86::CMP16ri8, X86::CMP16mi8, 1, 0 }, { X86::CMP16rr, X86::CMP16mr, 1, 0 }, @@ -667,46 +668,6 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) assert(AmbEntries.empty() && "Duplicated entries in unfolding maps?"); } -bool X86InstrInfo::isMoveInstr(const MachineInstr& MI, - unsigned &SrcReg, unsigned &DstReg, - unsigned &SrcSubIdx, unsigned &DstSubIdx) const { - switch (MI.getOpcode()) { - default: - return false; - case X86::MOV8rr: - case X86::MOV8rr_NOREX: - case X86::MOV16rr: - case X86::MOV32rr: - case X86::MOV64rr: - case X86::MOV32rr_TC: - case X86::MOV64rr_TC: - - // FP Stack register class copies - case X86::MOV_Fp3232: case X86::MOV_Fp6464: case X86::MOV_Fp8080: - case X86::MOV_Fp3264: case X86::MOV_Fp3280: - case X86::MOV_Fp6432: case X86::MOV_Fp8032: - - // Note that MOVSSrr and MOVSDrr are not considered copies. FR32 and FR64 - // copies are done with FsMOVAPSrr and FsMOVAPDrr. - - case X86::FsMOVAPSrr: - case X86::FsMOVAPDrr: - case X86::MOVAPSrr: - case X86::MOVAPDrr: - case X86::MOVDQArr: - case X86::MMX_MOVQ64rr: - assert(MI.getNumOperands() >= 2 && - MI.getOperand(0).isReg() && - MI.getOperand(1).isReg() && - "invalid register-register move instruction"); - SrcReg = MI.getOperand(1).getReg(); - DstReg = MI.getOperand(0).getReg(); - SrcSubIdx = MI.getOperand(1).getSubReg(); - DstSubIdx = MI.getOperand(0).getSubReg(); - return true; - } -} - bool X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, @@ -827,7 +788,7 @@ static bool isFrameStoreOpcode(int Opcode) { unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI, int &FrameIndex) const { if (isFrameLoadOpcode(MI->getOpcode())) - if (isFrameOperand(MI, 1, FrameIndex)) + if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex)) return MI->getOperand(0).getReg(); return 0; } @@ -866,7 +827,8 @@ bool X86InstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI, int &FrameIndex) const { if (isFrameStoreOpcode(MI->getOpcode())) - if (isFrameOperand(MI, 0, FrameIndex)) + if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 && + isFrameOperand(MI, 0, FrameIndex)) return MI->getOperand(X86::AddrNumOperands).getReg(); return 0; } @@ -1664,14 +1626,6 @@ bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { return !isPredicated(MI); } -// For purposes of branch analysis do not count FP_REG_KILL as a terminator. -static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI, - const X86InstrInfo &TII) { - if (MI->getOpcode() == X86::FP_REG_KILL) - return false; - return TII.isUnpredicatedTerminator(MI); -} - bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, @@ -1688,7 +1642,7 @@ bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB, // Working from the bottom, when we see a non-terminator instruction, we're // done. - if (!isBrAnalysisUnpredicatedTerminator(I, *this)) + if (!isUnpredicatedTerminator(I)) break; // A terminator that isn't a branch can't easily be handled by this @@ -1891,6 +1845,33 @@ static bool isHReg(unsigned Reg) { return X86::GR8_ABCD_HRegClass.contains(Reg); } +// Try and copy between VR128/VR64 and GR64 registers. +static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg) { + // SrcReg(VR128) -> DestReg(GR64) + // SrcReg(VR64) -> DestReg(GR64) + // SrcReg(GR64) -> DestReg(VR128) + // SrcReg(GR64) -> DestReg(VR64) + + if (X86::GR64RegClass.contains(DestReg)) { + if (X86::VR128RegClass.contains(SrcReg)) { + // Copy from a VR128 register to a GR64 register. + return X86::MOVPQIto64rr; + } else if (X86::VR64RegClass.contains(SrcReg)) { + // Copy from a VR64 register to a GR64 register. + return X86::MOVSDto64rr; + } + } else if (X86::GR64RegClass.contains(SrcReg)) { + // Copy from a GR64 register to a VR128 register. + if (X86::VR128RegClass.contains(DestReg)) + return X86::MOV64toPQIrr; + // Copy from a GR64 register to a VR64 register. + else if (X86::VR64RegClass.contains(DestReg)) + return X86::MOV64toSDrr; + } + + return 0; +} + void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, DebugLoc DL, unsigned DestReg, unsigned SrcReg, @@ -1915,6 +1896,8 @@ void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB, Opc = X86::MOVAPSrr; else if (X86::VR64RegClass.contains(DestReg, SrcReg)) Opc = X86::MMX_MOVQ64rr; + else + Opc = CopyToFromAsymmetricReg(DestReg, SrcReg); if (Opc) { BuildMI(MBB, MI, DL, get(Opc), DestReg) @@ -2046,6 +2029,8 @@ void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const { const MachineFunction &MF = *MBB.getParent(); + assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() && + "Stack slot too small for store"); bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF); unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM); DebugLoc DL = MBB.findDebugLoc(MI); @@ -2130,8 +2115,9 @@ bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB, CalleeFrameSize += SlotSize; BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill); } else { + const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(), - &X86::VR128RegClass, &RI); + RC, &RI); } } @@ -2161,8 +2147,9 @@ bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB, if (!X86::VR128RegClass.contains(Reg) && !isWin64) { BuildMI(MBB, MI, DL, get(Opc), Reg); } else { + const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(), - &X86::VR128RegClass, &RI); + RC, &RI); } } return true; @@ -2423,10 +2410,17 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, Alignment = (*LoadMI->memoperands_begin())->getAlignment(); else switch (LoadMI->getOpcode()) { + case X86::AVX_SET0PSY: + case X86::AVX_SET0PDY: + Alignment = 32; + break; case X86::V_SET0PS: case X86::V_SET0PD: case X86::V_SET0PI: case X86::V_SETALLONES: + case X86::AVX_SET0PS: + case X86::AVX_SET0PD: + case X86::AVX_SET0PI: Alignment = 16; break; case X86::FsFLD0SD: @@ -2453,12 +2447,22 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, } else if (Ops.size() != 1) return NULL; + // Make sure the subregisters match. + // Otherwise we risk changing the size of the load. + if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg()) + return NULL; + SmallVector<MachineOperand,X86::AddrNumOperands> MOs; switch (LoadMI->getOpcode()) { case X86::V_SET0PS: case X86::V_SET0PD: case X86::V_SET0PI: case X86::V_SETALLONES: + case X86::AVX_SET0PS: + case X86::AVX_SET0PD: + case X86::AVX_SET0PI: + case X86::AVX_SET0PSY: + case X86::AVX_SET0PDY: case X86::FsFLD0SD: case X86::FsFLD0SS: { // Folding a V_SET0P? or V_SETALLONES as a load, to ease register pressure. @@ -2485,10 +2489,13 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, // Create a constant-pool entry. MachineConstantPool &MCP = *MF.getConstantPool(); const Type *Ty; - if (LoadMI->getOpcode() == X86::FsFLD0SS) + unsigned Opc = LoadMI->getOpcode(); + if (Opc == X86::FsFLD0SS) Ty = Type::getFloatTy(MF.getFunction()->getContext()); - else if (LoadMI->getOpcode() == X86::FsFLD0SD) + else if (Opc == X86::FsFLD0SD) Ty = Type::getDoubleTy(MF.getFunction()->getContext()); + else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY) + Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8); else Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4); const Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ? @@ -2991,561 +2998,6 @@ bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) { return false; } - -/// determineREX - Determine if the MachineInstr has to be encoded with a X86-64 -/// REX prefix which specifies 1) 64-bit instructions, 2) non-default operand -/// size, and 3) use of X86-64 extended registers. -unsigned X86InstrInfo::determineREX(const MachineInstr &MI) { - unsigned REX = 0; - const TargetInstrDesc &Desc = MI.getDesc(); - - // Pseudo instructions do not need REX prefix byte. - if ((Desc.TSFlags & X86II::FormMask) == X86II::Pseudo) - return 0; - if (Desc.TSFlags & X86II::REX_W) - REX |= 1 << 3; - - unsigned NumOps = Desc.getNumOperands(); - if (NumOps) { - bool isTwoAddr = NumOps > 1 && - Desc.getOperandConstraint(1, TOI::TIED_TO) != -1; - - // If it accesses SPL, BPL, SIL, or DIL, then it requires a 0x40 REX prefix. - unsigned i = isTwoAddr ? 1 : 0; - for (unsigned e = NumOps; i != e; ++i) { - const MachineOperand& MO = MI.getOperand(i); - if (MO.isReg()) { - unsigned Reg = MO.getReg(); - if (isX86_64NonExtLowByteReg(Reg)) - REX |= 0x40; - } - } - - switch (Desc.TSFlags & X86II::FormMask) { - case X86II::MRMInitReg: - if (isX86_64ExtendedReg(MI.getOperand(0))) - REX |= (1 << 0) | (1 << 2); - break; - case X86II::MRMSrcReg: { - if (isX86_64ExtendedReg(MI.getOperand(0))) - REX |= 1 << 2; - i = isTwoAddr ? 2 : 1; - for (unsigned e = NumOps; i != e; ++i) { - const MachineOperand& MO = MI.getOperand(i); - if (isX86_64ExtendedReg(MO)) - REX |= 1 << 0; - } - break; - } - case X86II::MRMSrcMem: { - if (isX86_64ExtendedReg(MI.getOperand(0))) - REX |= 1 << 2; - unsigned Bit = 0; - i = isTwoAddr ? 2 : 1; - for (; i != NumOps; ++i) { - const MachineOperand& MO = MI.getOperand(i); - if (MO.isReg()) { - if (isX86_64ExtendedReg(MO)) - REX |= 1 << Bit; - Bit++; - } - } - break; - } - case X86II::MRM0m: case X86II::MRM1m: - case X86II::MRM2m: case X86II::MRM3m: - case X86II::MRM4m: case X86II::MRM5m: - case X86II::MRM6m: case X86II::MRM7m: - case X86II::MRMDestMem: { - unsigned e = (isTwoAddr ? X86::AddrNumOperands+1 : X86::AddrNumOperands); - i = isTwoAddr ? 1 : 0; - if (NumOps > e && isX86_64ExtendedReg(MI.getOperand(e))) - REX |= 1 << 2; - unsigned Bit = 0; - for (; i != e; ++i) { - const MachineOperand& MO = MI.getOperand(i); - if (MO.isReg()) { - if (isX86_64ExtendedReg(MO)) - REX |= 1 << Bit; - Bit++; - } - } - break; - } - default: { - if (isX86_64ExtendedReg(MI.getOperand(0))) - REX |= 1 << 0; - i = isTwoAddr ? 2 : 1; - for (unsigned e = NumOps; i != e; ++i) { - const MachineOperand& MO = MI.getOperand(i); - if (isX86_64ExtendedReg(MO)) - REX |= 1 << 2; - } - break; - } - } - } - return REX; -} - -/// sizePCRelativeBlockAddress - This method returns the size of a PC -/// relative block address instruction -/// -static unsigned sizePCRelativeBlockAddress() { - return 4; -} - -/// sizeGlobalAddress - Give the size of the emission of this global address -/// -static unsigned sizeGlobalAddress(bool dword) { - return dword ? 8 : 4; -} - -/// sizeConstPoolAddress - Give the size of the emission of this constant -/// pool address -/// -static unsigned sizeConstPoolAddress(bool dword) { - return dword ? 8 : 4; -} - -/// sizeExternalSymbolAddress - Give the size of the emission of this external -/// symbol -/// -static unsigned sizeExternalSymbolAddress(bool dword) { - return dword ? 8 : 4; -} - -/// sizeJumpTableAddress - Give the size of the emission of this jump -/// table address -/// -static unsigned sizeJumpTableAddress(bool dword) { - return dword ? 8 : 4; -} - -static unsigned sizeConstant(unsigned Size) { - return Size; -} - -static unsigned sizeRegModRMByte(){ - return 1; -} - -static unsigned sizeSIBByte(){ - return 1; -} - -static unsigned getDisplacementFieldSize(const MachineOperand *RelocOp) { - unsigned FinalSize = 0; - // If this is a simple integer displacement that doesn't require a relocation. - if (!RelocOp) { - FinalSize += sizeConstant(4); - return FinalSize; - } - - // Otherwise, this is something that requires a relocation. - if (RelocOp->isGlobal()) { - FinalSize += sizeGlobalAddress(false); - } else if (RelocOp->isCPI()) { - FinalSize += sizeConstPoolAddress(false); - } else if (RelocOp->isJTI()) { - FinalSize += sizeJumpTableAddress(false); - } else { - llvm_unreachable("Unknown value to relocate!"); - } - return FinalSize; -} - -static unsigned getMemModRMByteSize(const MachineInstr &MI, unsigned Op, - bool IsPIC, bool Is64BitMode) { - const MachineOperand &Op3 = MI.getOperand(Op+3); - int DispVal = 0; - const MachineOperand *DispForReloc = 0; - unsigned FinalSize = 0; - - // Figure out what sort of displacement we have to handle here. - if (Op3.isGlobal()) { - DispForReloc = &Op3; - } else if (Op3.isCPI()) { - if (Is64BitMode || IsPIC) { - DispForReloc = &Op3; - } else { - DispVal = 1; - } - } else if (Op3.isJTI()) { - if (Is64BitMode || IsPIC) { - DispForReloc = &Op3; - } else { - DispVal = 1; - } - } else { - DispVal = 1; - } - - const MachineOperand &Base = MI.getOperand(Op); - const MachineOperand &IndexReg = MI.getOperand(Op+2); - - unsigned BaseReg = Base.getReg(); - - // Is a SIB byte needed? - if ((!Is64BitMode || DispForReloc || BaseReg != 0) && - IndexReg.getReg() == 0 && - (BaseReg == 0 || X86RegisterInfo::getX86RegNum(BaseReg) != N86::ESP)) { - if (BaseReg == 0) { // Just a displacement? - // Emit special case [disp32] encoding - ++FinalSize; - FinalSize += getDisplacementFieldSize(DispForReloc); - } else { - unsigned BaseRegNo = X86RegisterInfo::getX86RegNum(BaseReg); - if (!DispForReloc && DispVal == 0 && BaseRegNo != N86::EBP) { - // Emit simple indirect register encoding... [EAX] f.e. - ++FinalSize; - // Be pessimistic and assume it's a disp32, not a disp8 - } else { - // Emit the most general non-SIB encoding: [REG+disp32] - ++FinalSize; - FinalSize += getDisplacementFieldSize(DispForReloc); - } - } - - } else { // We need a SIB byte, so start by outputting the ModR/M byte first - assert(IndexReg.getReg() != X86::ESP && - IndexReg.getReg() != X86::RSP && "Cannot use ESP as index reg!"); - - bool ForceDisp32 = false; - if (BaseReg == 0 || DispForReloc) { - // Emit the normal disp32 encoding. - ++FinalSize; - ForceDisp32 = true; - } else { - ++FinalSize; - } - - FinalSize += sizeSIBByte(); - - // Do we need to output a displacement? - if (DispVal != 0 || ForceDisp32) { - FinalSize += getDisplacementFieldSize(DispForReloc); - } - } - return FinalSize; -} - - -static unsigned GetInstSizeWithDesc(const MachineInstr &MI, - const TargetInstrDesc *Desc, - bool IsPIC, bool Is64BitMode) { - - unsigned Opcode = Desc->Opcode; - unsigned FinalSize = 0; - - // Emit the lock opcode prefix as needed. - if (Desc->TSFlags & X86II::LOCK) ++FinalSize; - - // Emit segment override opcode prefix as needed. - switch (Desc->TSFlags & X86II::SegOvrMask) { - case X86II::FS: - case X86II::GS: - ++FinalSize; - break; - default: llvm_unreachable("Invalid segment!"); - case 0: break; // No segment override! - } - - // Emit the repeat opcode prefix as needed. - if ((Desc->TSFlags & X86II::Op0Mask) == X86II::REP) ++FinalSize; - - // Emit the operand size opcode prefix as needed. - if (Desc->TSFlags & X86II::OpSize) ++FinalSize; - - // Emit the address size opcode prefix as needed. - if (Desc->TSFlags & X86II::AdSize) ++FinalSize; - - bool Need0FPrefix = false; - switch (Desc->TSFlags & X86II::Op0Mask) { - case X86II::TB: // Two-byte opcode prefix - case X86II::T8: // 0F 38 - case X86II::TA: // 0F 3A - Need0FPrefix = true; - break; - case X86II::TF: // F2 0F 38 - ++FinalSize; - Need0FPrefix = true; - break; - case X86II::REP: break; // already handled. - case X86II::XS: // F3 0F - ++FinalSize; - Need0FPrefix = true; - break; - case X86II::XD: // F2 0F - ++FinalSize; - Need0FPrefix = true; - break; - case X86II::D8: case X86II::D9: case X86II::DA: case X86II::DB: - case X86II::DC: case X86II::DD: case X86II::DE: case X86II::DF: - ++FinalSize; - break; // Two-byte opcode prefix - default: llvm_unreachable("Invalid prefix!"); - case 0: break; // No prefix! - } - - if (Is64BitMode) { - // REX prefix - unsigned REX = X86InstrInfo::determineREX(MI); - if (REX) - ++FinalSize; - } - - // 0x0F escape code must be emitted just before the opcode. - if (Need0FPrefix) - ++FinalSize; - - switch (Desc->TSFlags & X86II::Op0Mask) { - case X86II::T8: // 0F 38 - ++FinalSize; - break; - case X86II::TA: // 0F 3A - ++FinalSize; - break; - case X86II::TF: // F2 0F 38 - ++FinalSize; - break; - } - - // If this is a two-address instruction, skip one of the register operands. - unsigned NumOps = Desc->getNumOperands(); - unsigned CurOp = 0; - if (NumOps > 1 && Desc->getOperandConstraint(1, TOI::TIED_TO) != -1) - CurOp++; - else if (NumOps > 2 && Desc->getOperandConstraint(NumOps-1, TOI::TIED_TO)== 0) - // Skip the last source operand that is tied_to the dest reg. e.g. LXADD32 - --NumOps; - - switch (Desc->TSFlags & X86II::FormMask) { - default: llvm_unreachable("Unknown FormMask value in X86 MachineCodeEmitter!"); - case X86II::Pseudo: - // Remember the current PC offset, this is the PIC relocation - // base address. - switch (Opcode) { - default: - break; - case TargetOpcode::INLINEASM: { - const MachineFunction *MF = MI.getParent()->getParent(); - const TargetInstrInfo &TII = *MF->getTarget().getInstrInfo(); - FinalSize += TII.getInlineAsmLength(MI.getOperand(0).getSymbolName(), - *MF->getTarget().getMCAsmInfo()); - break; - } - case TargetOpcode::DBG_LABEL: - case TargetOpcode::EH_LABEL: - case TargetOpcode::DBG_VALUE: - break; - case TargetOpcode::IMPLICIT_DEF: - case TargetOpcode::KILL: - case X86::FP_REG_KILL: - break; - case X86::MOVPC32r: { - // This emits the "call" portion of this pseudo instruction. - ++FinalSize; - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - break; - } - } - CurOp = NumOps; - break; - case X86II::RawFrm: - ++FinalSize; - - if (CurOp != NumOps) { - const MachineOperand &MO = MI.getOperand(CurOp++); - if (MO.isMBB()) { - FinalSize += sizePCRelativeBlockAddress(); - } else if (MO.isGlobal()) { - FinalSize += sizeGlobalAddress(false); - } else if (MO.isSymbol()) { - FinalSize += sizeExternalSymbolAddress(false); - } else if (MO.isImm()) { - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - } else { - llvm_unreachable("Unknown RawFrm operand!"); - } - } - break; - - case X86II::AddRegFrm: - ++FinalSize; - ++CurOp; - - if (CurOp != NumOps) { - const MachineOperand &MO1 = MI.getOperand(CurOp++); - unsigned Size = X86II::getSizeOfImm(Desc->TSFlags); - if (MO1.isImm()) - FinalSize += sizeConstant(Size); - else { - bool dword = false; - if (Opcode == X86::MOV64ri) - dword = true; - if (MO1.isGlobal()) { - FinalSize += sizeGlobalAddress(dword); - } else if (MO1.isSymbol()) - FinalSize += sizeExternalSymbolAddress(dword); - else if (MO1.isCPI()) - FinalSize += sizeConstPoolAddress(dword); - else if (MO1.isJTI()) - FinalSize += sizeJumpTableAddress(dword); - } - } - break; - - case X86II::MRMDestReg: { - ++FinalSize; - FinalSize += sizeRegModRMByte(); - CurOp += 2; - if (CurOp != NumOps) { - ++CurOp; - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - } - break; - } - case X86II::MRMDestMem: { - ++FinalSize; - FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); - CurOp += X86::AddrNumOperands + 1; - if (CurOp != NumOps) { - ++CurOp; - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - } - break; - } - - case X86II::MRMSrcReg: - ++FinalSize; - FinalSize += sizeRegModRMByte(); - CurOp += 2; - if (CurOp != NumOps) { - ++CurOp; - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - } - break; - - case X86II::MRMSrcMem: { - ++FinalSize; - FinalSize += getMemModRMByteSize(MI, CurOp+1, IsPIC, Is64BitMode); - CurOp += X86::AddrNumOperands + 1; - if (CurOp != NumOps) { - ++CurOp; - FinalSize += sizeConstant(X86II::getSizeOfImm(Desc->TSFlags)); - } - break; - } - - case X86II::MRM0r: case X86II::MRM1r: - case X86II::MRM2r: case X86II::MRM3r: - case X86II::MRM4r: case X86II::MRM5r: - case X86II::MRM6r: case X86II::MRM7r: - ++FinalSize; - if (Desc->getOpcode() == X86::LFENCE || - Desc->getOpcode() == X86::MFENCE) { - // Special handling of lfence and mfence; - FinalSize += sizeRegModRMByte(); - } else if (Desc->getOpcode() == X86::MONITOR || - Desc->getOpcode() == X86::MWAIT) { - // Special handling of monitor and mwait. - FinalSize += sizeRegModRMByte() + 1; // +1 for the opcode. - } else { - ++CurOp; - FinalSize += sizeRegModRMByte(); - } - - if (CurOp != NumOps) { - const MachineOperand &MO1 = MI.getOperand(CurOp++); - unsigned Size = X86II::getSizeOfImm(Desc->TSFlags); - if (MO1.isImm()) - FinalSize += sizeConstant(Size); - else { - bool dword = false; - if (Opcode == X86::MOV64ri32) - dword = true; - if (MO1.isGlobal()) { - FinalSize += sizeGlobalAddress(dword); - } else if (MO1.isSymbol()) - FinalSize += sizeExternalSymbolAddress(dword); - else if (MO1.isCPI()) - FinalSize += sizeConstPoolAddress(dword); - else if (MO1.isJTI()) - FinalSize += sizeJumpTableAddress(dword); - } - } - break; - - case X86II::MRM0m: case X86II::MRM1m: - case X86II::MRM2m: case X86II::MRM3m: - case X86II::MRM4m: case X86II::MRM5m: - case X86II::MRM6m: case X86II::MRM7m: { - - ++FinalSize; - FinalSize += getMemModRMByteSize(MI, CurOp, IsPIC, Is64BitMode); - CurOp += X86::AddrNumOperands; - - if (CurOp != NumOps) { - const MachineOperand &MO = MI.getOperand(CurOp++); - unsigned Size = X86II::getSizeOfImm(Desc->TSFlags); - if (MO.isImm()) - FinalSize += sizeConstant(Size); - else { - bool dword = false; - if (Opcode == X86::MOV64mi32) - dword = true; - if (MO.isGlobal()) { - FinalSize += sizeGlobalAddress(dword); - } else if (MO.isSymbol()) - FinalSize += sizeExternalSymbolAddress(dword); - else if (MO.isCPI()) - FinalSize += sizeConstPoolAddress(dword); - else if (MO.isJTI()) - FinalSize += sizeJumpTableAddress(dword); - } - } - break; - - case X86II::MRM_C1: - case X86II::MRM_C8: - case X86II::MRM_C9: - case X86II::MRM_E8: - case X86II::MRM_F0: - FinalSize += 2; - break; - } - - case X86II::MRMInitReg: - ++FinalSize; - // Duplicate register, used by things like MOV8r0 (aka xor reg,reg). - FinalSize += sizeRegModRMByte(); - ++CurOp; - break; - } - - if (!Desc->isVariadic() && CurOp != NumOps) { - std::string msg; - raw_string_ostream Msg(msg); - Msg << "Cannot determine size: " << MI; - report_fatal_error(Msg.str()); - } - - - return FinalSize; -} - - -unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const { - const TargetInstrDesc &Desc = MI->getDesc(); - bool IsPIC = TM.getRelocationModel() == Reloc::PIC_; - bool Is64BitMode = TM.getSubtargetImpl()->is64Bit(); - unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode); - if (Desc.getOpcode() == X86::MOVPC32r) - Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode); - return Size; -} - /// getGlobalBaseReg - Return a virtual register initialized with the /// the global base register value. Output instructions required to /// initialize the register in the function entry block, if necessary. @@ -3573,7 +3025,7 @@ unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const { // that we don't include here. We don't want to replace instructions selected // by intrinsics. static const unsigned ReplaceableInstrs[][3] = { - //PackedInt PackedSingle PackedDouble + //PackedSingle PackedDouble PackedInt { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr }, { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm }, { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr }, @@ -3589,6 +3041,22 @@ static const unsigned ReplaceableInstrs[][3] = { { X86::V_SET0PS, X86::V_SET0PD, X86::V_SET0PI }, { X86::XORPSrm, X86::XORPDrm, X86::PXORrm }, { X86::XORPSrr, X86::XORPDrr, X86::PXORrr }, + // AVX 128-bit support + { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr }, + { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm }, + { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr }, + { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr }, + { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm }, + { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr }, + { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm }, + { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr }, + { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm }, + { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr }, + { X86::VORPSrm, X86::VORPDrm, X86::VPORrm }, + { X86::VORPSrr, X86::VORPDrr, X86::VPORrr }, + { X86::AVX_SET0PS, X86::AVX_SET0PD, X86::AVX_SET0PI }, + { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm }, + { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr }, }; // FIXME: Some shuffle and unpack instructions have equivalents in different @@ -3627,7 +3095,7 @@ namespace { /// global base register for x86-32. struct CGBR : public MachineFunctionPass { static char ID; - CGBR() : MachineFunctionPass(&ID) {} + CGBR() : MachineFunctionPass(ID) {} virtual bool runOnMachineFunction(MachineFunction &MF) { const X86TargetMachine *TM = diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index ad0217adb475..f33620641e88 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -311,6 +311,12 @@ namespace X86II { MRM_F0 = 40, MRM_F8 = 41, MRM_F9 = 42, + + /// RawFrmImm16 - This is used for CALL FAR instructions, which have two + /// immediates, the first of which is a 16 or 32-bit immediate (specified by + /// the imm encoding) and the second is a 16-bit fixed value. In the AMD + /// manual, this operand is described as pntr16:32 and pntr16:16 + RawFrmImm16 = 43, FormMask = 63, @@ -439,27 +445,27 @@ namespace X86II { //===------------------------------------------------------------------===// // VEX - The opcode prefix used by AVX instructions - VEX = 1ULL << 32, + VEX = 1U << 0, // VEX_W - Has a opcode specific functionality, but is used in the same // way as REX_W is for regular SSE instructions. - VEX_W = 1ULL << 33, + VEX_W = 1U << 1, // VEX_4V - Used to specify an additional AVX/SSE register. Several 2 // address instructions in SSE are represented as 3 address ones in AVX // and the additional register is encoded in VEX_VVVV prefix. - VEX_4V = 1ULL << 34, + VEX_4V = 1U << 2, // VEX_I8IMM - Specifies that the last register used in a AVX instruction, // must be encoded in the i8 immediate field. This usually happens in // instructions with 4 operands. - VEX_I8IMM = 1ULL << 35, + VEX_I8IMM = 1U << 3, // VEX_L - Stands for a bit in the VEX opcode prefix meaning the current // instruction uses 256-bit wide registers. This is usually auto detected if // a VR256 register is used, but some AVX instructions also have this field // marked when using a f256 memory references. - VEX_L = 1ULL << 36 + VEX_L = 1U << 4 }; // getBaseOpcodeFor - This function returns the "base" X86 opcode for the @@ -522,11 +528,12 @@ namespace X86II { case X86II::AddRegFrm: case X86II::MRMDestReg: case X86II::MRMSrcReg: + case X86II::RawFrmImm16: return -1; case X86II::MRMDestMem: return 0; case X86II::MRMSrcMem: { - bool HasVEX_4V = TSFlags & X86II::VEX_4V; + bool HasVEX_4V = (TSFlags >> 32) & X86II::VEX_4V; unsigned FirstMemOp = 1; if (HasVEX_4V) ++FirstMemOp;// Skip the register source (which is encoded in VEX_VVVV). @@ -610,12 +617,6 @@ public: /// virtual const X86RegisterInfo &getRegisterInfo() const { return RI; } - /// Return true if the instruction is a register to register move and return - /// the source and dest operands and their sub-register indices by reference. - virtual bool isMoveInstr(const MachineInstr &MI, - unsigned &SrcReg, unsigned &DstReg, - unsigned &SrcSubIdx, unsigned &DstSubIdx) const; - /// isCoalescableExtInstr - Return true if the instruction is a "coalescable" /// extension instruction. That is, it's like a copy where it's legal for the /// source to overlap the destination. e.g. X86::MOVSX64rr32. If this returns @@ -826,16 +827,11 @@ public: if (!MO.isReg()) return false; return isX86_64ExtendedReg(MO.getReg()); } - static unsigned determineREX(const MachineInstr &MI); /// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or /// higher) register? e.g. r8, xmm8, xmm13, etc. static bool isX86_64ExtendedReg(unsigned RegNo); - /// GetInstSize - Returns the size of the specified MachineInstr. - /// - virtual unsigned GetInstSizeInBytes(const MachineInstr *MI) const; - /// getGlobalBaseReg - Return a virtual register initialized with the /// the global base register value. Output instructions required to /// initialize the register in the function entry block, if necessary. diff --git a/lib/Target/X86/X86InstrInfo.td b/lib/Target/X86/X86InstrInfo.td index 1efef5a80b1b..09b7721a621d 100644 --- a/lib/Target/X86/X86InstrInfo.td +++ b/lib/Target/X86/X86InstrInfo.td @@ -80,6 +80,21 @@ def SDT_X86EHRET : SDTypeProfile<0, 1, [SDTCisInt<0>]>; def SDT_X86TCRET : SDTypeProfile<0, 2, [SDTCisPtrTy<0>, SDTCisVT<1, i32>]>; +def SDT_X86MEMBARRIER : SDTypeProfile<0, 0, []>; +def SDT_X86MEMBARRIERNoSSE : SDTypeProfile<0, 1, [SDTCisInt<0>]>; + +def X86MemBarrier : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIER, + [SDNPHasChain]>; +def X86MemBarrierNoSSE : SDNode<"X86ISD::MEMBARRIER", SDT_X86MEMBARRIERNoSSE, + [SDNPHasChain]>; +def X86MFence : SDNode<"X86ISD::MFENCE", SDT_X86MEMBARRIER, + [SDNPHasChain]>; +def X86SFence : SDNode<"X86ISD::SFENCE", SDT_X86MEMBARRIER, + [SDNPHasChain]>; +def X86LFence : SDNode<"X86ISD::LFENCE", SDT_X86MEMBARRIER, + [SDNPHasChain]>; + + def X86bsf : SDNode<"X86ISD::BSF", SDTUnaryArithWithFlags>; def X86bsr : SDNode<"X86ISD::BSR", SDTUnaryArithWithFlags>; def X86shld : SDNode<"X86ISD::SHLD", SDTIntShiftDOp>; @@ -222,7 +237,7 @@ def i16mem : X86MemOperand<"printi16mem">; def i32mem : X86MemOperand<"printi32mem">; def i64mem : X86MemOperand<"printi64mem">; def i128mem : X86MemOperand<"printi128mem">; -//def i256mem : X86MemOperand<"printi256mem">; +def i256mem : X86MemOperand<"printi256mem">; def f32mem : X86MemOperand<"printf32mem">; def f64mem : X86MemOperand<"printf64mem">; def f80mem : X86MemOperand<"printf80mem">; @@ -333,15 +348,21 @@ def tls32addr : ComplexPattern<i32, 5, "SelectTLSADDRAddr", // X86 Instruction Predicate Definitions. def HasCMov : Predicate<"Subtarget->hasCMov()">; def NoCMov : Predicate<"!Subtarget->hasCMov()">; -def HasMMX : Predicate<"Subtarget->hasMMX()">; -def HasSSE1 : Predicate<"Subtarget->hasSSE1()">; -def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; -def HasSSE3 : Predicate<"Subtarget->hasSSE3()">; -def HasSSSE3 : Predicate<"Subtarget->hasSSSE3()">; -def HasSSE41 : Predicate<"Subtarget->hasSSE41()">; -def HasSSE42 : Predicate<"Subtarget->hasSSE42()">; -def HasSSE4A : Predicate<"Subtarget->hasSSE4A()">; + +// FIXME: temporary hack to let codegen assert or generate poor code in case +// no AVX version of the desired intructions is present, this is better for +// incremental dev (without fallbacks it's easier to spot what's missing) +def HasMMX : Predicate<"Subtarget->hasMMX() && !Subtarget->hasAVX()">; +def HasSSE1 : Predicate<"Subtarget->hasSSE1() && !Subtarget->hasAVX()">; +def HasSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; +def HasSSE3 : Predicate<"Subtarget->hasSSE3() && !Subtarget->hasAVX()">; +def HasSSSE3 : Predicate<"Subtarget->hasSSSE3() && !Subtarget->hasAVX()">; +def HasSSE41 : Predicate<"Subtarget->hasSSE41() && !Subtarget->hasAVX()">; +def HasSSE42 : Predicate<"Subtarget->hasSSE42() && !Subtarget->hasAVX()">; +def HasSSE4A : Predicate<"Subtarget->hasSSE4A() && !Subtarget->hasAVX()">; + def HasAVX : Predicate<"Subtarget->hasAVX()">; +def HasCLMUL : Predicate<"Subtarget->hasCLMUL()">; def HasFMA3 : Predicate<"Subtarget->hasFMA3()">; def HasFMA4 : Predicate<"Subtarget->hasFMA4()">; def FPStackf32 : Predicate<"!Subtarget->hasSSE1()">; @@ -393,9 +414,7 @@ def X86_COND_O : PatLeaf<(i8 13)>; def X86_COND_P : PatLeaf<(i8 14)>; // alt. COND_PE def X86_COND_S : PatLeaf<(i8 15)>; -def immSext8 : PatLeaf<(imm), [{ - return N->getSExtValue() == (int8_t)N->getSExtValue(); -}]>; +def immSext8 : PatLeaf<(imm), [{ return immSext8(N); }]>; def i16immSExt8 : PatLeaf<(i16 immSext8)>; def i32immSExt8 : PatLeaf<(i32 immSext8)>; @@ -559,9 +578,10 @@ def VASTART_SAVE_XMM_REGS : I<0, Pseudo, // The main point of having separate instruction are extra unmodelled effects // (compared to ordinary calls) like stack pointer change. -def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins), - "# dynamic stack allocation", - [(X86MingwAlloca)]>; +let Defs = [EAX, ESP, EFLAGS], Uses = [ESP] in + def MINGW_ALLOCA : I<0, Pseudo, (outs), (ins), + "# dynamic stack allocation", + [(X86MingwAlloca)]>; } // Nop @@ -574,10 +594,14 @@ let neverHasSideEffects = 1 in { } // Trap -def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>; -def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3", []>; +let Uses = [EFLAGS] in { + def INTO : I<0xce, RawFrm, (outs), (ins), "into", []>; +} +def INT3 : I<0xcc, RawFrm, (outs), (ins), "int3", + [(int_x86_int (i8 3))]>; // FIXME: need to make sure that "int $3" matches int3 -def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", []>; +def INT : Ii8<0xcd, RawFrm, (outs), (ins i8imm:$trap), "int\t$trap", + [(int_x86_int imm:$trap)]>; def IRET16 : I<0xcf, RawFrm, (outs), (ins), "iret{w}", []>, OpSize; def IRET32 : I<0xcf, RawFrm, (outs), (ins), "iret{l}", []>; @@ -650,16 +674,16 @@ let Uses = [ECX], isBranch = 1, isTerminator = 1 in // Indirect branches let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { def JMP32r : I<0xFF, MRM4r, (outs), (ins GR32:$dst), "jmp{l}\t{*}$dst", - [(brind GR32:$dst)]>; + [(brind GR32:$dst)]>, Requires<[In32BitMode]>; def JMP32m : I<0xFF, MRM4m, (outs), (ins i32mem:$dst), "jmp{l}\t{*}$dst", - [(brind (loadi32 addr:$dst))]>; + [(brind (loadi32 addr:$dst))]>, Requires<[In32BitMode]>; - def FARJMP16i : Iseg16<0xEA, RawFrm, (outs), - (ins i16imm:$seg, i16imm:$off), - "ljmp{w}\t$seg, $off", []>, OpSize; - def FARJMP32i : Iseg32<0xEA, RawFrm, (outs), - (ins i16imm:$seg, i32imm:$off), - "ljmp{l}\t$seg, $off", []>; + def FARJMP16i : Iseg16<0xEA, RawFrmImm16, (outs), + (ins i16imm:$off, i16imm:$seg), + "ljmp{w}\t{$seg, $off|$off, $seg}", []>, OpSize; + def FARJMP32i : Iseg32<0xEA, RawFrmImm16, (outs), + (ins i32imm:$off, i16imm:$seg), + "ljmp{l}\t{$seg, $off|$off, $seg}", []>; def FARJMP16m : I<0xFF, MRM5m, (outs), (ins opaque32mem:$dst), "ljmp{w}\t{*}$dst", []>, OpSize; @@ -670,9 +694,9 @@ let isBranch = 1, isTerminator = 1, isBarrier = 1, isIndirectBranch = 1 in { // Loop instructions -def LOOP : I<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>; -def LOOPE : I<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>; -def LOOPNE : I<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>; +def LOOP : Ii8PCRel<0xE2, RawFrm, (outs), (ins brtarget8:$dst), "loop\t$dst", []>; +def LOOPE : Ii8PCRel<0xE1, RawFrm, (outs), (ins brtarget8:$dst), "loope\t$dst", []>; +def LOOPNE : Ii8PCRel<0xE0, RawFrm, (outs), (ins brtarget8:$dst), "loopne\t$dst", []>; //===----------------------------------------------------------------------===// // Call Instructions... @@ -695,12 +719,12 @@ let isCall = 1 in def CALL32m : I<0xFF, MRM2m, (outs), (ins i32mem:$dst, variable_ops), "call\t{*}$dst", [(X86call (loadi32 addr:$dst))]>; - def FARCALL16i : Iseg16<0x9A, RawFrm, (outs), - (ins i16imm:$seg, i16imm:$off), - "lcall{w}\t$seg, $off", []>, OpSize; - def FARCALL32i : Iseg32<0x9A, RawFrm, (outs), - (ins i16imm:$seg, i32imm:$off), - "lcall{l}\t$seg, $off", []>; + def FARCALL16i : Iseg16<0x9A, RawFrmImm16, (outs), + (ins i16imm:$off, i16imm:$seg), + "lcall{w}\t{$seg, $off|$off, $seg}", []>, OpSize; + def FARCALL32i : Iseg32<0x9A, RawFrmImm16, (outs), + (ins i32imm:$off, i16imm:$seg), + "lcall{l}\t{$seg, $off|$off, $seg}", []>; def FARCALL16m : I<0xFF, MRM3m, (outs), (ins opaque32mem:$dst), "lcall{w}\t{*}$dst", []>, OpSize; @@ -721,7 +745,8 @@ def ENTER : I<0xC8, RawFrm, (outs), (ins i16imm:$len, i8imm:$lvl), // Tail call stuff. -let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in +let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, + isCodeGenOnly = 1 in let Defs = [EAX, ECX, EDX, FP0, FP1, FP2, FP3, FP4, FP5, FP6, ST0, MM0, MM1, MM2, MM3, MM4, MM5, MM6, MM7, XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7, @@ -756,7 +781,7 @@ let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1 in // let Defs = [EBP, ESP], Uses = [EBP, ESP], mayLoad = 1, neverHasSideEffects=1 in def LEAVE : I<0xC9, RawFrm, - (outs), (ins), "leave", []>; + (outs), (ins), "leave", []>, Requires<[In32BitMode]>; def POPCNT16rr : I<0xB8, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src), "popcnt{w}\t{$src, $dst|$dst, $src}", []>, OpSize, XS; @@ -934,7 +959,7 @@ def SYSRET : I<0x07, RawFrm, def SYSENTER : I<0x34, RawFrm, (outs), (ins), "sysenter", []>, TB; def SYSEXIT : I<0x35, RawFrm, - (outs), (ins), "sysexit", []>, TB; + (outs), (ins), "sysexit", []>, TB, Requires<[In32BitMode]>; def WAIT : I<0x9B, RawFrm, (outs), (ins), "wait", []>; @@ -1025,17 +1050,23 @@ def MOV32mi : Ii32<0xC7, MRM0m, (outs), (ins i32mem:$dst, i32imm:$src), /// moffs8, moffs16 and moffs32 versions of moves. The immediate is a /// 32-bit offset from the PC. These are only valid in x86-32 mode. def MOV8o8a : Ii32 <0xA0, RawFrm, (outs), (ins offset8:$src), - "mov{b}\t{$src, %al|%al, $src}", []>; + "mov{b}\t{$src, %al|%al, $src}", []>, + Requires<[In32BitMode]>; def MOV16o16a : Ii32 <0xA1, RawFrm, (outs), (ins offset16:$src), - "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize; + "mov{w}\t{$src, %ax|%ax, $src}", []>, OpSize, + Requires<[In32BitMode]>; def MOV32o32a : Ii32 <0xA1, RawFrm, (outs), (ins offset32:$src), - "mov{l}\t{$src, %eax|%eax, $src}", []>; + "mov{l}\t{$src, %eax|%eax, $src}", []>, + Requires<[In32BitMode]>; def MOV8ao8 : Ii32 <0xA2, RawFrm, (outs offset8:$dst), (ins), - "mov{b}\t{%al, $dst|$dst, %al}", []>; + "mov{b}\t{%al, $dst|$dst, %al}", []>, + Requires<[In32BitMode]>; def MOV16ao16 : Ii32 <0xA3, RawFrm, (outs offset16:$dst), (ins), - "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize; + "mov{w}\t{%ax, $dst|$dst, %ax}", []>, OpSize, + Requires<[In32BitMode]>; def MOV32ao32 : Ii32 <0xA3, RawFrm, (outs offset32:$dst), (ins), - "mov{l}\t{%eax, $dst|$dst, %eax}", []>; + "mov{l}\t{%eax, $dst|$dst, %eax}", []>, + Requires<[In32BitMode]>; // Moves to and from segment registers def MOV16rs : I<0x8C, MRMDestReg, (outs GR16:$dst), (ins SEGMENT_REG:$src), @@ -1087,6 +1118,7 @@ def MOV32mr : I<0x89, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src), [(store GR32:$src, addr:$dst)]>; /// Versions of MOV32rr, MOV32rm, and MOV32mr for i32mem_TC and GR32_TC. +let isCodeGenOnly = 1 in { let neverHasSideEffects = 1 in def MOV32rr_TC : I<0x89, MRMDestReg, (outs GR32_TC:$dst), (ins GR32_TC:$src), "mov{l}\t{$src, $dst|$dst, $src}", []>; @@ -1101,10 +1133,12 @@ let mayStore = 1 in def MOV32mr_TC : I<0x89, MRMDestMem, (outs), (ins i32mem_TC:$dst, GR32_TC:$src), "mov{l}\t{$src, $dst|$dst, $src}", []>; +} // Versions of MOV8rr, MOV8mr, and MOV8rm that use i8mem_NOREX and GR8_NOREX so // that they can be used for copying and storing h registers, which can't be // encoded when a REX prefix is present. +let isCodeGenOnly = 1 in { let neverHasSideEffects = 1 in def MOV8rr_NOREX : I<0x88, MRMDestReg, (outs GR8_NOREX:$dst), (ins GR8_NOREX:$src), @@ -1118,6 +1152,7 @@ let mayLoad = 1, def MOV8rm_NOREX : I<0x8A, MRMSrcMem, (outs GR8_NOREX:$dst), (ins i8mem_NOREX:$src), "mov{b}\t{$src, $dst|$dst, $src} # NOREX", []>; +} // Moves to and from debug registers def MOV32rd : I<0x21, MRMDestReg, (outs GR32:$dst), (ins DEBUG_REG:$src), @@ -1137,7 +1172,7 @@ def MOV32cr : I<0x22, MRMSrcReg, (outs CONTROL_REG:$dst), (ins GR32:$src), // Extra precision multiplication -// AL is really implied by AX, by the registers in Defs must match the +// AL is really implied by AX, but the registers in Defs must match the // SDNode results (i8, i32). let Defs = [AL,EFLAGS,AX], Uses = [AL] in def MUL8r : I<0xF6, MRM4r, (outs), (ins GR8:$src), "mul{b}\t$src", @@ -3895,6 +3930,20 @@ def EH_RETURN : I<0xC3, RawFrm, (outs), (ins GR32:$addr), // Atomic support // +// Memory barriers + +// TODO: Get this to fold the constant into the instruction. +def OR32mrLocked : I<0x09, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$zero), + "lock\n\t" + "or{l}\t{$zero, $dst|$dst, $zero}", + []>, Requires<[In32BitMode]>, LOCK; + +let hasSideEffects = 1 in { +def Int_MemBarrier : I<0, Pseudo, (outs), (ins), + "#MEMBARRIER", + [(X86MemBarrier)]>, Requires<[HasSSE2]>; +} + // Atomic swap. These are just normal xchg instructions. But since a memory // operand is referenced, the atomicity is ensured. let Constraints = "$val = $dst" in { @@ -4928,6 +4977,12 @@ include "X86Instr64bit.td" include "X86InstrFragmentsSIMD.td" //===----------------------------------------------------------------------===// +// FMA - Fused Multiply-Add support (requires FMA) +//===----------------------------------------------------------------------===// + +include "X86InstrFMA.td" + +//===----------------------------------------------------------------------===// // XMM Floating point support (requires SSE / SSE2) //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86InstrMMX.td b/lib/Target/X86/X86InstrMMX.td index 6cf7ac83620e..11d4179534dc 100644 --- a/lib/Target/X86/X86InstrMMX.td +++ b/lib/Target/X86/X86InstrMMX.td @@ -164,7 +164,7 @@ let neverHasSideEffects = 1 in def MMX_MOVQ2FR64rr: SSDIi8<0xD6, MRMSrcReg, (outs FR64:$dst), (ins VR64:$src), "movq2dq\t{$src, $dst|$dst, $src}", []>; -def MMX_MOVFR642Qrr: SSDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins FR64:$src), +def MMX_MOVFR642Qrr: SDIi8<0xD6, MRMSrcReg, (outs VR64:$dst), (ins FR64:$src), "movdq2q\t{$src, $dst|$dst, $src}", []>; def MMX_MOVNTQmr : MMXI<0xE7, MRMDestMem, (outs), (ins i64mem:$dst, VR64:$src), diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index ebe161b46bdc..f5466f83f519 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -142,7 +142,7 @@ multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC, !if(Is2Addr, !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse", + [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_", !strconcat(SSEVer, !strconcat("_", !strconcat(OpcodeStr, FPSizeStr)))) RC:$src1, RC:$src2))], d>; @@ -150,7 +150,7 @@ multiclass sse12_fp_packed_int<bits<8> opc, string OpcodeStr, RegisterClass RC, !if(Is2Addr, !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_sse", + [(set RC:$dst, (!nameconcat<Intrinsic>("int_x86_", !strconcat(SSEVer, !strconcat("_", !strconcat(OpcodeStr, FPSizeStr)))) RC:$src1, (mem_frag addr:$src2)))], d>; @@ -256,10 +256,10 @@ def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src), let isAsmParserOnly = 1 in { def VMOVSSmr : SI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src), "movss\t{$src, $dst|$dst, $src}", - [(store FR32:$src, addr:$dst)]>, XS, VEX_4V; + [(store FR32:$src, addr:$dst)]>, XS, VEX; def VMOVSDmr : SI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src), "movsd\t{$src, $dst|$dst, $src}", - [(store FR64:$src, addr:$dst)]>, XD, VEX_4V; + [(store FR64:$src, addr:$dst)]>, XD, VEX; } // Extract and store. @@ -340,6 +340,15 @@ def VMOVUPDYmr : VPDI<0x11, MRMDestMem, (outs), (ins f256mem:$dst, VR256:$src), "movupd\t{$src, $dst|$dst, $src}", [(store (v4f64 VR256:$src), addr:$dst)]>, VEX; } + +def : Pat<(int_x86_avx_loadu_ps_256 addr:$src), (VMOVUPSYrm addr:$src)>; +def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src), + (VMOVUPSYmr addr:$dst, VR256:$src)>; + +def : Pat<(int_x86_avx_loadu_pd_256 addr:$src), (VMOVUPDYrm addr:$src)>; +def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src), + (VMOVUPDYmr addr:$dst, VR256:$src)>; + def MOVAPSmr : PSI<0x29, MRMDestMem, (outs), (ins f128mem:$dst, VR128:$src), "movaps\t{$src, $dst|$dst, $src}", [(alignedstore (v4f32 VR128:$src), addr:$dst)]>; @@ -516,6 +525,14 @@ multiclass sse12_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, [(set DstRC:$dst, (OpNode (ld_frag addr:$src)))]>; } +multiclass sse12_cvt_s_np<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, + X86MemOperand x86memop, string asm> { + def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm, + []>; + def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm, + []>; +} + multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag, string asm, Domain d> { @@ -526,35 +543,58 @@ multiclass sse12_cvt_p<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, } multiclass sse12_vcvt_avx<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, - SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag, - string asm> { + X86MemOperand x86memop, string asm> { def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src), - asm, []>; + !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>; def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), - (ins DstRC:$src1, x86memop:$src), asm, []>; + (ins DstRC:$src1, x86memop:$src), + !strconcat(asm,"\t{$src, $src1, $dst|$dst, $src1, $src}"), []>; } let isAsmParserOnly = 1 in { -defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32, - "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX; -defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64, - "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX; -defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32, - "cvtsi2ss\t{$src, $src1, $dst|$dst, $src1, $src}">, XS, - VEX_4V; -defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32, - "cvtsi2sd\t{$src, $src1, $dst|$dst, $src1, $src}">, XD, - VEX_4V; +defm VCVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32, + "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX; +defm VCVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32, + "cvttss2si\t{$src, $dst|$dst, $src}">, XS, VEX, + VEX_W; +defm VCVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64, + "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, VEX; +defm VCVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64, + "cvttsd2si\t{$src, $dst|$dst, $src}">, XD, + VEX, VEX_W; + +// The assembler can recognize rr 64-bit instructions by seeing a rxx +// register, but the same isn't true when only using memory operands, +// provide other assembly "l" and "q" forms to address this explicitly +// where appropriate to do so. +defm VCVTSI2SS : sse12_vcvt_avx<0x2A, GR32, FR32, i32mem, "cvtsi2ss">, XS, + VEX_4V; +defm VCVTSI2SS64 : sse12_vcvt_avx<0x2A, GR64, FR32, i64mem, "cvtsi2ss{q}">, XS, + VEX_4V, VEX_W; +defm VCVTSI2SD : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd">, XD, + VEX_4V; +defm VCVTSI2SDL : sse12_vcvt_avx<0x2A, GR32, FR64, i32mem, "cvtsi2sd{l}">, XD, + VEX_4V; +defm VCVTSI2SD64 : sse12_vcvt_avx<0x2A, GR64, FR64, i64mem, "cvtsi2sd{q}">, XD, + VEX_4V, VEX_W; } defm CVTTSS2SI : sse12_cvt_s<0x2C, FR32, GR32, fp_to_sint, f32mem, loadf32, "cvttss2si\t{$src, $dst|$dst, $src}">, XS; +defm CVTTSS2SI64 : sse12_cvt_s<0x2C, FR32, GR64, fp_to_sint, f32mem, loadf32, + "cvttss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W; defm CVTTSD2SI : sse12_cvt_s<0x2C, FR64, GR32, fp_to_sint, f64mem, loadf64, "cvttsd2si\t{$src, $dst|$dst, $src}">, XD; +defm CVTTSD2SI64 : sse12_cvt_s<0x2C, FR64, GR64, fp_to_sint, f64mem, loadf64, + "cvttsd2si{q}\t{$src, $dst|$dst, $src}">, XD, REX_W; defm CVTSI2SS : sse12_cvt_s<0x2A, GR32, FR32, sint_to_fp, i32mem, loadi32, "cvtsi2ss\t{$src, $dst|$dst, $src}">, XS; +defm CVTSI2SS64 : sse12_cvt_s<0x2A, GR64, FR32, sint_to_fp, i64mem, loadi64, + "cvtsi2ss{q}\t{$src, $dst|$dst, $src}">, XS, REX_W; defm CVTSI2SD : sse12_cvt_s<0x2A, GR32, FR64, sint_to_fp, i32mem, loadi32, "cvtsi2sd\t{$src, $dst|$dst, $src}">, XD; +defm CVTSI2SD64 : sse12_cvt_s<0x2A, GR64, FR64, sint_to_fp, i64mem, loadi64, + "cvtsi2sd{q}\t{$src, $dst|$dst, $src}">, XD, REX_W; // Conversion Instructions Intrinsics - Match intrinsics which expect MM // and/or XMM operand(s). @@ -570,10 +610,12 @@ multiclass sse12_cvt_pint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, multiclass sse12_cvt_sint<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop, PatFrag ld_frag, string asm> { - def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), asm, - [(set DstRC:$dst, (Int SrcRC:$src))]>; - def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), asm, - [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>; + def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins SrcRC:$src), + !strconcat(asm, "\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, (Int SrcRC:$src))]>; + def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), (ins x86memop:$src), + !strconcat(asm, "\t{$src, $dst|$dst, $src}"), + [(set DstRC:$dst, (Int (ld_frag addr:$src)))]>; } multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC, @@ -588,35 +630,79 @@ multiclass sse12_cvt_pint_3addr<bits<8> opc, RegisterClass SrcRC, multiclass sse12_cvt_sint_3addr<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC, Intrinsic Int, X86MemOperand x86memop, - PatFrag ld_frag, string asm> { + PatFrag ld_frag, string asm, bit Is2Addr = 1> { def rr : SI<opc, MRMSrcReg, (outs DstRC:$dst), (ins DstRC:$src1, SrcRC:$src2), - asm, [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>; + !if(Is2Addr, + !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), + !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), + [(set DstRC:$dst, (Int DstRC:$src1, SrcRC:$src2))]>; def rm : SI<opc, MRMSrcMem, (outs DstRC:$dst), - (ins DstRC:$src1, x86memop:$src2), asm, + (ins DstRC:$src1, x86memop:$src2), + !if(Is2Addr, + !strconcat(asm, "\t{$src2, $dst|$dst, $src2}"), + !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), [(set DstRC:$dst, (Int DstRC:$src1, (ld_frag addr:$src2)))]>; } let isAsmParserOnly = 1 in { defm Int_VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si, - f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS, - VEX; + f32mem, load, "cvtss2si">, XS, VEX; + defm Int_VCVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, + int_x86_sse_cvtss2si64, f32mem, load, "cvtss2si">, + XS, VEX, VEX_W; defm Int_VCVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si, - f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, - VEX; + f128mem, load, "cvtsd2si">, XD, VEX; + defm Int_VCVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, + int_x86_sse2_cvtsd2si64, f128mem, load, "cvtsd2si">, + XD, VEX, VEX_W; + + // FIXME: The asm matcher has a hack to ignore instructions with _Int and Int_ + // Get rid of this hack or rename the intrinsics, there are several + // intructions that only match with the intrinsic form, why create duplicates + // to let them be recognized by the assembler? + defm VCVTSD2SI_alt : sse12_cvt_s_np<0x2D, FR64, GR32, f64mem, + "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX; + defm VCVTSD2SI64 : sse12_cvt_s_np<0x2D, FR64, GR64, f64mem, + "cvtsd2si\t{$src, $dst|$dst, $src}">, XD, VEX, VEX_W; } defm Int_CVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si, - f32mem, load, "cvtss2si\t{$src, $dst|$dst, $src}">, XS; + f32mem, load, "cvtss2si">, XS; +defm Int_CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64, + f32mem, load, "cvtss2si{q}">, XS, REX_W; defm Int_CVTSD2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse2_cvtsd2si, - f128mem, load, "cvtsd2si\t{$src, $dst|$dst, $src}">, XD; + f128mem, load, "cvtsd2si">, XD; +defm Int_CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64, + f128mem, load, "cvtsd2si">, XD, REX_W; +defm CVTSD2SI64 : sse12_cvt_s_np<0x2D, VR128, GR64, f64mem, "cvtsd2si{q}">, XD, + REX_W; + +let isAsmParserOnly = 1 in { + defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128, + int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss", 0>, XS, VEX_4V; + defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128, + int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss", 0>, XS, VEX_4V, + VEX_W; + defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128, + int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd", 0>, XD, VEX_4V; + defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128, + int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd", 0>, XD, + VEX_4V, VEX_W; +} let Constraints = "$src1 = $dst" in { defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128, int_x86_sse_cvtsi2ss, i32mem, loadi32, - "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XS; + "cvtsi2ss">, XS; + defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128, + int_x86_sse_cvtsi642ss, i64mem, loadi64, + "cvtsi2ss{q}">, XS, REX_W; defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128, int_x86_sse2_cvtsi2sd, i32mem, loadi32, - "cvtsi2ss\t{$src2, $dst|$dst, $src2}">, XD; + "cvtsi2sd">, XD; + defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128, + int_x86_sse2_cvtsi642sd, i64mem, loadi64, + "cvtsi2sd">, XD, REX_W; } // Instructions below don't have an AVX form. @@ -645,35 +731,48 @@ let Constraints = "$src1 = $dst" in { /// SSE 1 Only // Aliases for intrinsics -let isAsmParserOnly = 1, Pattern = []<dag> in { -defm Int_VCVTTSS2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32, - int_x86_sse_cvttss2si, f32mem, load, - "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XS; -defm Int_VCVTTSD2SI : sse12_cvt_sint_3addr<0x2C, VR128, GR32, - int_x86_sse2_cvttsd2si, f128mem, load, - "cvttss2si\t{$src2, $src1, $dst|$dst, $src1, $src2}">, XD; +let isAsmParserOnly = 1 in { +defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si, + f32mem, load, "cvttss2si">, XS, VEX; +defm Int_VCVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, + int_x86_sse_cvttss2si64, f32mem, load, + "cvttss2si">, XS, VEX, VEX_W; +defm Int_VCVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si, + f128mem, load, "cvttss2si">, XD, VEX; +defm Int_VCVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, + int_x86_sse2_cvttsd2si64, f128mem, load, + "cvttss2si">, XD, VEX, VEX_W; } defm Int_CVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si, - f32mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">, - XS; + f32mem, load, "cvttss2si">, XS; +defm Int_CVTTSS2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, + int_x86_sse_cvttss2si64, f32mem, load, + "cvttss2si{q}">, XS, REX_W; defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si, - f128mem, load, "cvttss2si\t{$src, $dst|$dst, $src}">, - XD; + f128mem, load, "cvttss2si">, XD; +defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64, + int_x86_sse2_cvttsd2si64, f128mem, load, + "cvttss2si{q}">, XD, REX_W; let isAsmParserOnly = 1, Pattern = []<dag> in { -defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load, - "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX; -defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load, - "cvtdq2ps\t{$src, $dst|$dst, $src}", - SSEPackedSingle>, TB, VEX; -defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, f256mem, load, - "cvtdq2ps\t{$src, $dst|$dst, $src}", - SSEPackedSingle>, TB, VEX; +defm VCVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load, + "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS, VEX; +defm VCVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load, + "cvtss2si\t{$src, $dst|$dst, $src}">, XS, VEX, + VEX_W; +defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load, + "cvtdq2ps\t{$src, $dst|$dst, $src}", + SSEPackedSingle>, TB, VEX; +defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, undef, i256mem, load, + "cvtdq2ps\t{$src, $dst|$dst, $src}", + SSEPackedSingle>, TB, VEX; } let Pattern = []<dag> in { defm CVTSS2SI : sse12_cvt_s<0x2D, FR32, GR32, undef, f32mem, load /*dummy*/, "cvtss2si{l}\t{$src, $dst|$dst, $src}">, XS; -defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, f128mem, load /*dummy*/, +defm CVTSS2SI64 : sse12_cvt_s<0x2D, FR32, GR64, undef, f32mem, load /*dummy*/, + "cvtss2si{q}\t{$src, $dst|$dst, $src}">, XS, REX_W; +defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, undef, i128mem, load /*dummy*/, "cvtdq2ps\t{$src, $dst|$dst, $src}", SSEPackedSingle>, TB; /* PD SSE3 form is avaiable */ } @@ -701,13 +800,11 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src), let isAsmParserOnly = 1 in defm Int_VCVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128, - int_x86_sse2_cvtsd2ss, f64mem, load, - "cvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}">, - XS, VEX_4V; + int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss", 0>, + XS, VEX_4V; let Constraints = "$src1 = $dst" in defm Int_CVTSD2SS: sse12_cvt_sint_3addr<0x5A, VR128, VR128, - int_x86_sse2_cvtsd2ss, f64mem, load, - "cvtsd2ss\t{$src2, $dst|$dst, $src2}">, XS; + int_x86_sse2_cvtsd2ss, f64mem, load, "cvtsd2ss">, XS; // Convert scalar single to scalar double let isAsmParserOnly = 1 in { // SSE2 instructions with XS prefix @@ -806,6 +903,7 @@ def Int_CVTDQ2PDrm : I<0xE6, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src), (bitconvert (memopv2i64 addr:$src))))]>, XS, Requires<[HasSSE2]>; + // Convert packed single/double fp to doubleword let isAsmParserOnly = 1 in { def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), @@ -964,11 +1062,11 @@ def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), let isAsmParserOnly = 1 in { def Int_VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "cvtps2pd\t{$src, $dst|$dst, $src}", + "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))]>, VEX, Requires<[HasAVX]>; def Int_VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), - "cvtps2pd\t{$src, $dst|$dst, $src}", + "vcvtps2pd\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse2_cvtps2pd (load addr:$src)))]>, VEX, Requires<[HasAVX]>; @@ -1029,6 +1127,39 @@ def Int_CVTPD2PSrm : PDI<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), [(set VR128:$dst, (int_x86_sse2_cvtpd2ps (memop addr:$src)))]>; +// AVX 256-bit register conversion intrinsics +// FIXME: Migrate SSE conversion intrinsics matching to use patterns as below +// whenever possible to avoid declaring two versions of each one. +def : Pat<(int_x86_avx_cvtdq2_ps_256 VR256:$src), + (VCVTDQ2PSYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvtdq2_ps_256 (memopv8i32 addr:$src)), + (VCVTDQ2PSYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvt_pd2_ps_256 VR256:$src), + (VCVTPD2PSYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvt_pd2_ps_256 (memopv4f64 addr:$src)), + (VCVTPD2PSYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvt_ps2dq_256 VR256:$src), + (VCVTPS2DQYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvt_ps2dq_256 (memopv8f32 addr:$src)), + (VCVTPS2DQYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvt_ps2_pd_256 VR128:$src), + (VCVTPS2PDYrr VR128:$src)>; +def : Pat<(int_x86_avx_cvt_ps2_pd_256 (memopv4f32 addr:$src)), + (VCVTPS2PDYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvtt_pd2dq_256 VR256:$src), + (VCVTTPD2DQYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvtt_pd2dq_256 (memopv4f64 addr:$src)), + (VCVTTPD2DQYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvtt_ps2dq_256 VR256:$src), + (VCVTTPS2DQYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvtt_ps2dq_256 (memopv8f32 addr:$src)), + (VCVTTPS2DQYrm addr:$src)>; + //===----------------------------------------------------------------------===// // SSE 1 & 2 - Compare Instructions //===----------------------------------------------------------------------===// @@ -1193,16 +1324,14 @@ let isAsmParserOnly = 1 in { "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}", "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}", SSEPackedDouble>, OpSize, VEX_4V; - let Pattern = []<dag> in { - defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_sse_cmp_ps, - "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}", - "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}", - SSEPackedSingle>, VEX_4V; - defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_sse2_cmp_pd, - "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}", - "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}", - SSEPackedDouble>, OpSize, VEX_4V; - } + defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_ps_256, + "cmp${cc}ps\t{$src, $src1, $dst|$dst, $src1, $src}", + "cmpps\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}", + SSEPackedSingle>, VEX_4V; + defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, int_x86_avx_cmp_pd_256, + "cmp${cc}pd\t{$src, $src1, $dst|$dst, $src1, $src}", + "cmppd\t{$src2, $src, $src1, $dst|$dst, $src1, $src, $src2}", + SSEPackedDouble>, OpSize, VEX_4V; } let Constraints = "$src1 = $dst" in { defm CMPPS : sse12_cmp_packed<VR128, f128mem, int_x86_sse_cmp_ps, @@ -1232,24 +1361,30 @@ def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)), multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop, ValueType vt, string asm, PatFrag mem_frag, Domain d, bit IsConvertibleToThreeAddress = 0> { - def rmi : PIi8<0xC6, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, f128mem:$src2, i8imm:$src3), asm, - [(set VR128:$dst, (vt (shufp:$src3 - VR128:$src1, (mem_frag addr:$src2))))], d>; + def rmi : PIi8<0xC6, MRMSrcMem, (outs RC:$dst), + (ins RC:$src1, f128mem:$src2, i8imm:$src3), asm, + [(set RC:$dst, (vt (shufp:$src3 + RC:$src1, (mem_frag addr:$src2))))], d>; let isConvertibleToThreeAddress = IsConvertibleToThreeAddress in - def rri : PIi8<0xC6, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), asm, - [(set VR128:$dst, - (vt (shufp:$src3 VR128:$src1, VR128:$src2)))], d>; + def rri : PIi8<0xC6, MRMSrcReg, (outs RC:$dst), + (ins RC:$src1, RC:$src2, i8imm:$src3), asm, + [(set RC:$dst, + (vt (shufp:$src3 RC:$src1, RC:$src2)))], d>; } let isAsmParserOnly = 1 in { - defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32, - "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", - memopv4f32, SSEPackedSingle>, VEX_4V; - defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64, - "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}", - memopv2f64, SSEPackedDouble>, OpSize, VEX_4V; + defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32, + "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + memopv4f32, SSEPackedSingle>, VEX_4V; + defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32, + "shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + memopv8f32, SSEPackedSingle>, VEX_4V; + defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64, + "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}", + memopv2f64, SSEPackedDouble>, OpSize, VEX_4V; + defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64, + "shufpd\t{$src3, $src2, $src1, $dst|$dst, $src2, $src2, $src3}", + memopv4f64, SSEPackedDouble>, OpSize, VEX_4V; } let Constraints = "$src1 = $dst" in { @@ -1351,12 +1486,23 @@ let isAsmParserOnly = 1 in { defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd", SSEPackedDouble>, OpSize, VEX; - // FIXME: merge with multiclass above when the intrinsics come. - def VMOVMSKPSYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src), + defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256, + "movmskps", SSEPackedSingle>, VEX; + defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256, + "movmskpd", SSEPackedDouble>, OpSize, + VEX; + + // Assembler Only + def VMOVMSKPSr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX; + def VMOVMSKPDr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize, + VEX; + def VMOVMSKPSYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskps\t{$src, $dst|$dst, $src}", [], SSEPackedSingle>, VEX; - def VMOVMSKPDYrr : PI<0x50, MRMSrcReg, (outs GR32:$dst), (ins VR256:$src), + def VMOVMSKPDYr64r : PI<0x50, MRMSrcReg, (outs GR64:$dst), (ins VR256:$src), "movmskpd\t{$src, $dst|$dst, $src}", [], SSEPackedDouble>, OpSize, - VEX; + VEX; } //===----------------------------------------------------------------------===// @@ -1536,6 +1682,9 @@ let isCommutable = 0 in /// /// These three forms can each be reg+reg or reg+mem. /// + +/// FIXME: once all 256-bit intrinsics are matched, cleanup and refactor those +/// classes below multiclass basic_sse12_fp_binop_s<bits<8> opc, string OpcodeStr, SDNode OpNode, bit Is2Addr = 1> { defm SS : sse12_fp_scalar<opc, !strconcat(OpcodeStr, "ss"), @@ -1565,7 +1714,7 @@ multiclass basic_sse12_fp_binop_p_y<bits<8> opc, string OpcodeStr, } multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr, - bit Is2Addr = 1> { + bit Is2Addr = 1> { defm SS : sse12_fp_scalar_int<opc, OpcodeStr, VR128, !strconcat(OpcodeStr, "ss"), "", "_ss", ssmem, sse_load_f32, Is2Addr>, XS; defm SD : sse12_fp_scalar_int<opc, OpcodeStr, VR128, @@ -1573,37 +1722,57 @@ multiclass basic_sse12_fp_binop_s_int<bits<8> opc, string OpcodeStr, } multiclass basic_sse12_fp_binop_p_int<bits<8> opc, string OpcodeStr, - bit Is2Addr = 1> { + bit Is2Addr = 1> { defm PS : sse12_fp_packed_int<opc, OpcodeStr, VR128, - !strconcat(OpcodeStr, "ps"), "", "_ps", f128mem, memopv4f32, + !strconcat(OpcodeStr, "ps"), "sse", "_ps", f128mem, memopv4f32, SSEPackedSingle, Is2Addr>, TB; defm PD : sse12_fp_packed_int<opc, OpcodeStr, VR128, - !strconcat(OpcodeStr, "pd"), "2", "_pd", f128mem, memopv2f64, + !strconcat(OpcodeStr, "pd"), "sse2", "_pd", f128mem, memopv2f64, SSEPackedDouble, Is2Addr>, TB, OpSize; } +multiclass basic_sse12_fp_binop_p_y_int<bits<8> opc, string OpcodeStr> { + defm PSY : sse12_fp_packed_int<opc, OpcodeStr, VR256, + !strconcat(OpcodeStr, "ps"), "avx", "_ps_256", f256mem, memopv8f32, + SSEPackedSingle, 0>, TB; + + defm PDY : sse12_fp_packed_int<opc, OpcodeStr, VR256, + !strconcat(OpcodeStr, "pd"), "avx", "_pd_256", f256mem, memopv4f64, + SSEPackedDouble, 0>, TB, OpSize; +} + // Binary Arithmetic instructions let isAsmParserOnly = 1 in { defm VADD : basic_sse12_fp_binop_s<0x58, "add", fadd, 0>, + basic_sse12_fp_binop_s_int<0x58, "add", 0>, basic_sse12_fp_binop_p<0x58, "add", fadd, 0>, basic_sse12_fp_binop_p_y<0x58, "add", fadd>, VEX_4V; defm VMUL : basic_sse12_fp_binop_s<0x59, "mul", fmul, 0>, + basic_sse12_fp_binop_s_int<0x59, "mul", 0>, basic_sse12_fp_binop_p<0x59, "mul", fmul, 0>, basic_sse12_fp_binop_p_y<0x59, "mul", fmul>, VEX_4V; let isCommutable = 0 in { defm VSUB : basic_sse12_fp_binop_s<0x5C, "sub", fsub, 0>, + basic_sse12_fp_binop_s_int<0x5C, "sub", 0>, basic_sse12_fp_binop_p<0x5C, "sub", fsub, 0>, basic_sse12_fp_binop_p_y<0x5C, "sub", fsub>, VEX_4V; defm VDIV : basic_sse12_fp_binop_s<0x5E, "div", fdiv, 0>, + basic_sse12_fp_binop_s_int<0x5E, "div", 0>, basic_sse12_fp_binop_p<0x5E, "div", fdiv, 0>, basic_sse12_fp_binop_p_y<0x5E, "div", fdiv>, VEX_4V; defm VMAX : basic_sse12_fp_binop_s<0x5F, "max", X86fmax, 0>, + basic_sse12_fp_binop_s_int<0x5F, "max", 0>, basic_sse12_fp_binop_p<0x5F, "max", X86fmax, 0>, - basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>, VEX_4V; + basic_sse12_fp_binop_p_int<0x5F, "max", 0>, + basic_sse12_fp_binop_p_y<0x5F, "max", X86fmax>, + basic_sse12_fp_binop_p_y_int<0x5F, "max">, VEX_4V; defm VMIN : basic_sse12_fp_binop_s<0x5D, "min", X86fmin, 0>, + basic_sse12_fp_binop_s_int<0x5D, "min", 0>, basic_sse12_fp_binop_p<0x5D, "min", X86fmin, 0>, + basic_sse12_fp_binop_p_int<0x5D, "min", 0>, + basic_sse12_fp_binop_p_y_int<0x5D, "min">, basic_sse12_fp_binop_p_y<0x5D, "min", X86fmin>, VEX_4V; } } @@ -1668,20 +1837,20 @@ multiclass sse1_fp_unop_s<bits<8> opc, string OpcodeStr, multiclass sse1_fp_unop_s_avx<bits<8> opc, string OpcodeStr, SDNode OpNode, Intrinsic F32Int> { def SSr : SSI<opc, MRMSrcReg, (outs FR32:$dst), (ins FR32:$src1, FR32:$src2), - !strconcat(!strconcat("v", OpcodeStr), + !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; def SSm : I<opc, MRMSrcMem, (outs FR32:$dst), (ins FR32:$src1, f32mem:$src2), - !strconcat(!strconcat("v", OpcodeStr), + !strconcat(OpcodeStr, "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, XS, Requires<[HasAVX, OptForSize]>; - def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2), - !strconcat(!strconcat("v", OpcodeStr), - "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; - def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, ssmem:$src2), - !strconcat(!strconcat("v", OpcodeStr), - "ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; + def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + !strconcat(OpcodeStr, + "ss\t{$src, $dst, $dst|$dst, $dst, $src}"), + [(set VR128:$dst, (F32Int VR128:$src))]>; + def SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst), (ins ssmem:$src), + !strconcat(OpcodeStr, + "ss\t{$src, $dst, $dst|$dst, $dst, $src}"), + [(set VR128:$dst, (F32Int sse_load_f32:$src))]>; } /// sse1_fp_unop_p - SSE1 unops in packed form. @@ -1715,6 +1884,16 @@ multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr, [(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))]>; } +/// sse1_fp_unop_p_y_int - AVX 256-bit intrinsics unops in packed forms. +multiclass sse1_fp_unop_p_y_int<bits<8> opc, string OpcodeStr, + Intrinsic V4F32Int> { + def PSYr_Int : PSI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"), + [(set VR256:$dst, (V4F32Int VR256:$src))]>; + def PSYm_Int : PSI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), + !strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"), + [(set VR256:$dst, (V4F32Int (memopv8f32 addr:$src)))]>; +} /// sse2_fp_unop_s - SSE2 unops in scalar form. multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, @@ -1738,21 +1917,19 @@ multiclass sse2_fp_unop_s<bits<8> opc, string OpcodeStr, /// sse2_fp_unop_s_avx - AVX SSE2 unops in scalar form. multiclass sse2_fp_unop_s_avx<bits<8> opc, string OpcodeStr, SDNode OpNode, Intrinsic F64Int> { - def SDr : VSDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), - !strconcat(OpcodeStr, - "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; - def SDm : VSDI<opc, MRMSrcMem, (outs FR64:$dst), - (ins FR64:$src1, f64mem:$src2), - !strconcat(OpcodeStr, - "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; - def SDr_Int : VSDI<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2), - !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - []>; - def SDm_Int : VSDI<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, sdmem:$src2), - !strconcat(OpcodeStr, "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - []>; + def SDr : SDI<opc, MRMSrcReg, (outs FR64:$dst), (ins FR64:$src1, FR64:$src2), + !strconcat(OpcodeStr, + "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; + def SDm : SDI<opc, MRMSrcMem, (outs FR64:$dst), + (ins FR64:$src1, f64mem:$src2), + !strconcat(OpcodeStr, + "sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>; + def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"), + [(set VR128:$dst, (F64Int VR128:$src))]>; + def SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst), (ins sdmem:$src), + !strconcat(OpcodeStr, "sd\t{$src, $dst, $dst|$dst, $dst, $src}"), + [(set VR128:$dst, (F64Int sse_load_f64:$src))]>; } /// sse2_fp_unop_p - SSE2 unops in vector forms. @@ -1787,29 +1964,48 @@ multiclass sse2_fp_unop_p_int<bits<8> opc, string OpcodeStr, [(set VR128:$dst, (V2F64Int (memopv2f64 addr:$src)))]>; } +/// sse2_fp_unop_p_y_int - AVX 256-bit intrinsic unops in vector forms. +multiclass sse2_fp_unop_p_y_int<bits<8> opc, string OpcodeStr, + Intrinsic V2F64Int> { + def PDYr_Int : PDI<opc, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"), + [(set VR256:$dst, (V2F64Int VR256:$src))]>; + def PDYm_Int : PDI<opc, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), + !strconcat(OpcodeStr, "pd\t{$src, $dst|$dst, $src}"), + [(set VR256:$dst, (V2F64Int (memopv4f64 addr:$src)))]>; +} + let isAsmParserOnly = 1, Predicates = [HasAVX] in { // Square root. - defm VSQRT : sse1_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse_sqrt_ss>, - sse2_fp_unop_s_avx<0x51, "sqrt", fsqrt, int_x86_sse2_sqrt_sd>, + defm VSQRT : sse1_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse_sqrt_ss>, + sse2_fp_unop_s_avx<0x51, "vsqrt", fsqrt, int_x86_sse2_sqrt_sd>, VEX_4V; defm VSQRT : sse1_fp_unop_p<0x51, "vsqrt", fsqrt>, sse2_fp_unop_p<0x51, "vsqrt", fsqrt>, sse1_fp_unop_p_y<0x51, "vsqrt", fsqrt>, sse2_fp_unop_p_y<0x51, "vsqrt", fsqrt>, + sse1_fp_unop_p_int<0x51, "vsqrt", int_x86_sse_sqrt_ps>, + sse2_fp_unop_p_int<0x51, "vsqrt", int_x86_sse2_sqrt_pd>, + sse1_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_ps_256>, + sse2_fp_unop_p_y_int<0x51, "vsqrt", int_x86_avx_sqrt_pd_256>, VEX; // Reciprocal approximations. Note that these typically require refinement // in order to obtain suitable precision. - defm VRSQRT : sse1_fp_unop_s_avx<0x52, "rsqrt", X86frsqrt, + defm VRSQRT : sse1_fp_unop_s_avx<0x52, "vrsqrt", X86frsqrt, int_x86_sse_rsqrt_ss>, VEX_4V; defm VRSQRT : sse1_fp_unop_p<0x52, "vrsqrt", X86frsqrt>, - sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>, VEX; + sse1_fp_unop_p_y<0x52, "vrsqrt", X86frsqrt>, + sse1_fp_unop_p_y_int<0x52, "vrsqrt", int_x86_avx_rsqrt_ps_256>, + sse1_fp_unop_p_int<0x52, "vrsqrt", int_x86_sse_rsqrt_ps>, VEX; - defm VRCP : sse1_fp_unop_s_avx<0x53, "rcp", X86frcp, int_x86_sse_rcp_ss>, + defm VRCP : sse1_fp_unop_s_avx<0x53, "vrcp", X86frcp, int_x86_sse_rcp_ss>, VEX_4V; defm VRCP : sse1_fp_unop_p<0x53, "vrcp", X86frcp>, - sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>, VEX; + sse1_fp_unop_p_y<0x53, "vrcp", X86frcp>, + sse1_fp_unop_p_y_int<0x53, "vrcp", int_x86_avx_rcp_ps_256>, + sse1_fp_unop_p_int<0x53, "vrcp", int_x86_sse_rcp_ps>, VEX; } // Square root. @@ -1898,6 +2094,13 @@ let isAsmParserOnly = 1 in { } } +def : Pat<(int_x86_avx_movnt_dq_256 addr:$dst, VR256:$src), + (VMOVNTDQYmr addr:$dst, VR256:$src)>; +def : Pat<(int_x86_avx_movnt_pd_256 addr:$dst, VR256:$src), + (VMOVNTPDYmr addr:$dst, VR256:$src)>; +def : Pat<(int_x86_avx_movnt_ps_256 addr:$dst, VR256:$src), + (VMOVNTPSYmr addr:$dst, VR256:$src)>; + def MOVNTPSmr_Int : PSI<0x2B, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), "movntps\t{$src, $dst|$dst, $src}", [(int_x86_sse_movnt_ps addr:$dst, VR128:$src)]>; @@ -1961,11 +2164,14 @@ def PREFETCHNTA : PSI<0x18, MRM0m, (outs), (ins i8mem:$src), // Load, store, and memory fence def SFENCE : I<0xAE, MRM_F8, (outs), (ins), "sfence", [(int_x86_sse_sfence)]>, TB, Requires<[HasSSE1]>; +def : Pat<(X86SFence), (SFENCE)>; // Alias instructions that map zero vector to pxor / xorp* for sse. // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-zeros value if folding it would be beneficial. -// FIXME: Change encoding to pseudo! +// FIXME: Change encoding to pseudo! This is blocked right now by the x86 +// JIT implementatioan, it does not expand the instructions below like +// X86MCInstLower does. let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, isCodeGenOnly = 1 in { def V_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "", @@ -1977,6 +2183,26 @@ def V_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "", [(set VR128:$dst, (v4i32 immAllZerosV))]>; } +// The same as done above but for AVX. The 128-bit versions are the +// same, but re-encoded. The 256-bit does not support PI version. +// FIXME: Change encoding to pseudo! This is blocked right now by the x86 +// JIT implementatioan, it does not expand the instructions below like +// X86MCInstLower does. +let isReMaterializable = 1, isAsCheapAsAMove = 1, canFoldAsLoad = 1, + isCodeGenOnly = 1, Predicates = [HasAVX] in { +def AVX_SET0PS : PSI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "", + [(set VR128:$dst, (v4f32 immAllZerosV))]>, VEX_4V; +def AVX_SET0PD : PDI<0x57, MRMInitReg, (outs VR128:$dst), (ins), "", + [(set VR128:$dst, (v2f64 immAllZerosV))]>, VEX_4V; +def AVX_SET0PSY : PSI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "", + [(set VR256:$dst, (v8f32 immAllZerosV))]>, VEX_4V; +def AVX_SET0PDY : PDI<0x57, MRMInitReg, (outs VR256:$dst), (ins), "", + [(set VR256:$dst, (v4f64 immAllZerosV))]>, VEX_4V; +let ExeDomain = SSEPackedInt in +def AVX_SET0PI : PDI<0xEF, MRMInitReg, (outs VR128:$dst), (ins), "", + [(set VR128:$dst, (v4i32 immAllZerosV))]>; +} + def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>; def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>; def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>; @@ -2003,35 +2229,47 @@ def STMXCSR : PSI<0xAE, MRM3m, (outs), (ins i32mem:$dst), //===---------------------------------------------------------------------===// // SSE2 - Move Aligned/Unaligned Packed Integer Instructions //===---------------------------------------------------------------------===// + let ExeDomain = SSEPackedInt in { // SSE integer instructions let isAsmParserOnly = 1 in { - let neverHasSideEffects = 1 in - def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; - def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX; + let neverHasSideEffects = 1 in { + def VMOVDQArr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + def VMOVDQAYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + } + def VMOVDQUrr : VPDI<0x6F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), + "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX; + def VMOVDQUYrr : VPDI<0x6F, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + "movdqu\t{$src, $dst|$dst, $src}", []>, XS, VEX; let canFoldAsLoad = 1, mayLoad = 1 in { - def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "movdqa\t{$src, $dst|$dst, $src}", - [/*(set VR128:$dst, (alignedloadv2i64 addr:$src))*/]>, - VEX; - def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "vmovdqu\t{$src, $dst|$dst, $src}", - [/*(set VR128:$dst, (loadv2i64 addr:$src))*/]>, - XS, VEX, Requires<[HasAVX]>; + def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + let Predicates = [HasAVX] in { + def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), + "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX; + def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), + "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX; + } } let mayStore = 1 in { - def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs), - (ins i128mem:$dst, VR128:$src), - "movdqa\t{$src, $dst|$dst, $src}", - [/*(alignedstore (v2i64 VR128:$src), addr:$dst)*/]>, VEX; - def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "vmovdqu\t{$src, $dst|$dst, $src}", - [/*(store (v2i64 VR128:$src), addr:$dst)*/]>, - XS, VEX, Requires<[HasAVX]>; + def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs), + (ins i128mem:$dst, VR128:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs), + (ins i256mem:$dst, VR256:$src), + "movdqa\t{$src, $dst|$dst, $src}", []>, VEX; + let Predicates = [HasAVX] in { + def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), + "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX; + def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src), + "vmovdqu\t{$src, $dst|$dst, $src}",[]>, XS, VEX; + } } } @@ -2084,6 +2322,10 @@ def MOVDQUmr_Int : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), } // ExeDomain = SSEPackedInt +def : Pat<(int_x86_avx_loadu_dq_256 addr:$src), (VMOVDQUYrm addr:$src)>; +def : Pat<(int_x86_avx_storeu_dq_256 addr:$dst, VR256:$src), + (VMOVDQUYmr addr:$dst, VR256:$src)>; + //===---------------------------------------------------------------------===// // SSE2 - Packed Integer Arithmetic Instructions //===---------------------------------------------------------------------===// @@ -2376,6 +2618,25 @@ let ExeDomain = SSEPackedInt in { } } // Constraints = "$src1 = $dst" +let Predicates = [HasAVX] in { + def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2), + (v2i64 (VPSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>; + def : Pat<(int_x86_sse2_psrl_dq VR128:$src1, imm:$src2), + (v2i64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>; + def : Pat<(int_x86_sse2_psll_dq_bs VR128:$src1, imm:$src2), + (v2i64 (VPSLLDQri VR128:$src1, imm:$src2))>; + def : Pat<(int_x86_sse2_psrl_dq_bs VR128:$src1, imm:$src2), + (v2i64 (VPSRLDQri VR128:$src1, imm:$src2))>; + def : Pat<(v2f64 (X86fsrl VR128:$src1, i32immSExt8:$src2)), + (v2f64 (VPSRLDQri VR128:$src1, (BYTE_imm imm:$src2)))>; + + // Shift up / down and insert zero's. + def : Pat<(v2i64 (X86vshl VR128:$src, (i8 imm:$amt))), + (v2i64 (VPSLLDQri VR128:$src, (BYTE_imm imm:$amt)))>; + def : Pat<(v2i64 (X86vshr VR128:$src, (i8 imm:$amt))), + (v2i64 (VPSRLDQri VR128:$src, (BYTE_imm imm:$amt)))>; +} + let Predicates = [HasSSE2] in { def : Pat<(int_x86_sse2_psll_dq VR128:$src1, imm:$src2), (v2i64 (PSLLDQri VR128:$src1, (BYTE_imm imm:$src2)))>; @@ -2662,11 +2923,16 @@ def PEXTRWri : PDIi8<0xC5, MRMSrcReg, imm:$src2))]>; // Insert -let isAsmParserOnly = 1, Predicates = [HasAVX] in - defm PINSRW : sse2_pinsrw<0>, OpSize, VEX_4V; +let isAsmParserOnly = 1, Predicates = [HasAVX] in { + defm VPINSRW : sse2_pinsrw<0>, OpSize, VEX_4V; + def VPINSRWrr64i : Ii8<0xC4, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, GR64:$src2, i32i8imm:$src3), + "vpinsrw\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, OpSize, VEX_4V; +} let Constraints = "$src1 = $dst" in - defm VPINSRW : sse2_pinsrw, TB, OpSize; + defm PINSRW : sse2_pinsrw, TB, OpSize, Requires<[HasSSE2]>; } // ExeDomain = SSEPackedInt @@ -2676,10 +2942,13 @@ let Constraints = "$src1 = $dst" in let ExeDomain = SSEPackedInt in { -let isAsmParserOnly = 1 in -def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), +let isAsmParserOnly = 1 in { +def VPMOVMSKBrr : VPDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "pmovmskb\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>, VEX; +def VPMOVMSKBr64r : VPDI<0xD7, MRMSrcReg, (outs GR64:$dst), (ins VR128:$src), + "pmovmskb\t{$src, $dst|$dst, $src}", []>, VEX; +} def PMOVMSKBrr : PDI<0xD7, MRMSrcReg, (outs GR32:$dst), (ins VR128:$src), "pmovmskb\t{$src, $dst|$dst, $src}", [(set GR32:$dst, (int_x86_sse2_pmovmskb_128 VR128:$src))]>; @@ -2939,18 +3208,20 @@ def : Pat<(v2i64 (X86vzmovl (bc_v2i64 (loadv4i32 addr:$src)))), // Instructions to match in the assembler let isAsmParserOnly = 1 in { -// This instructions is in fact an alias to movd with 64 bit dst def VMOVQs64rr : VPDI<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR64:$src), "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W; def VMOVQd64rr : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), "movq\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W; +// Recognize "movd" with GR64 destination, but encode as a "movq" +def VMOVQd64rr_alt : VPDI<0x7E, MRMDestReg, (outs GR64:$dst), (ins VR128:$src), + "movd\t{$src, $dst|$dst, $src}", []>, VEX, VEX_W; } // Instructions for the disassembler // xr = XMM register // xm = mem64 -let isAsmParserOnly = 1 in +let isAsmParserOnly = 1, Predicates = [HasAVX] in def VMOVQxrxr: I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "vmovq\t{$src, $dst|$dst, $src}", []>, VEX, XS; def MOVQxrxr : I<0x7E, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), @@ -2970,19 +3241,14 @@ def LFENCE : I<0xAE, MRM_E8, (outs), (ins), "lfence", [(int_x86_sse2_lfence)]>, TB, Requires<[HasSSE2]>; def MFENCE : I<0xAE, MRM_F0, (outs), (ins), "mfence", [(int_x86_sse2_mfence)]>, TB, Requires<[HasSSE2]>; +def : Pat<(X86LFence), (LFENCE)>; +def : Pat<(X86MFence), (MFENCE)>; + // Pause. This "instruction" is encoded as "rep; nop", so even though it // was introduced with SSE2, it's backward compatible. def PAUSE : I<0x90, RawFrm, (outs), (ins), "pause", []>, REP; -//TODO: custom lower this so as to never even generate the noop -def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), - (i8 0)), (NOOP)>; -def : Pat<(membarrier (i8 0), (i8 0), (i8 0), (i8 1), (i8 1)), (SFENCE)>; -def : Pat<(membarrier (i8 1), (i8 0), (i8 0), (i8 0), (i8 1)), (LFENCE)>; -def : Pat<(membarrier (i8 imm), (i8 imm), (i8 imm), (i8 imm), - (i8 1)), (MFENCE)>; - // Alias instructions that map zero vector to pxor / xorp* for sse. // We set canFoldAsLoad because this can be converted to a constant-pool // load of an all-ones value if folding it would be beneficial. @@ -3027,13 +3293,13 @@ def CVTPD2DQrr : S3DI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), // Convert Packed DW Integers to Packed Double FP let isAsmParserOnly = 1, Predicates = [HasAVX] in { def VCVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), - "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; + "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; def VCVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), - "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; + "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; def VCVTDQ2PDYrm : S3SI<0xE6, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src), - "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; + "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; def VCVTDQ2PDYrr : S3SI<0xE6, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src), - "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; + "vcvtdq2pd\t{$src, $dst|$dst, $src}", []>, VEX; } def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), @@ -3041,6 +3307,17 @@ def CVTDQ2PDrm : S3SI<0xE6, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), def CVTDQ2PDrr : S3SI<0xE6, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), "cvtdq2pd\t{$src, $dst|$dst, $src}", []>; +// AVX 256-bit register conversion intrinsics +def : Pat<(int_x86_avx_cvtdq2_pd_256 VR128:$src), + (VCVTDQ2PDYrr VR128:$src)>; +def : Pat<(int_x86_avx_cvtdq2_pd_256 (memopv4i32 addr:$src)), + (VCVTDQ2PDYrm addr:$src)>; + +def : Pat<(int_x86_avx_cvt_pd2dq_256 VR256:$src), + (VCVTPD2DQYrr VR256:$src)>; +def : Pat<(int_x86_avx_cvt_pd2dq_256 (memopv4f64 addr:$src)), + (VCVTPD2DQYrm addr:$src)>; + //===---------------------------------------------------------------------===// // SSE3 - Move Instructions //===---------------------------------------------------------------------===// @@ -3057,9 +3334,20 @@ def rm : S3SI<op, MRMSrcMem, (outs VR128:$dst), (ins f128mem:$src), (memopv4f32 addr:$src), (undef)))]>; } +multiclass sse3_replicate_sfp_y<bits<8> op, PatFrag rep_frag, + string OpcodeStr> { +def rr : S3SI<op, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; +def rm : S3SI<op, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), []>; +} + let isAsmParserOnly = 1, Predicates = [HasAVX] in { -defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX; -defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX; + // FIXME: Merge above classes when we have patterns for the ymm version + defm VMOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "vmovshdup">, VEX; + defm VMOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "vmovsldup">, VEX; + defm VMOVSHDUPY : sse3_replicate_sfp_y<0x16, movshdup, "vmovshdup">, VEX; + defm VMOVSLDUPY : sse3_replicate_sfp_y<0x12, movsldup, "vmovsldup">, VEX; } defm MOVSHDUP : sse3_replicate_sfp<0x16, movshdup, "movshdup">; defm MOVSLDUP : sse3_replicate_sfp<0x12, movsldup, "movsldup">; @@ -3076,15 +3364,31 @@ def rm : S3DI<0x12, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src), (undef))))]>; } -let isAsmParserOnly = 1, Predicates = [HasAVX] in - defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX; +multiclass sse3_replicate_dfp_y<string OpcodeStr> { +def rr : S3DI<0x12, MRMSrcReg, (outs VR256:$dst), (ins VR256:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + []>; +def rm : S3DI<0x12, MRMSrcMem, (outs VR256:$dst), (ins f256mem:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + []>; +} + +let isAsmParserOnly = 1, Predicates = [HasAVX] in { + // FIXME: Merge above classes when we have patterns for the ymm version + defm VMOVDDUP : sse3_replicate_dfp<"vmovddup">, VEX; + defm VMOVDDUPY : sse3_replicate_dfp_y<"vmovddup">, VEX; +} defm MOVDDUP : sse3_replicate_dfp<"movddup">; // Move Unaligned Integer -let isAsmParserOnly = 1 in +let isAsmParserOnly = 1, Predicates = [HasAVX] in { def VLDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "vlddqu\t{$src, $dst|$dst, $src}", - [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX; + "vlddqu\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>, VEX; + def VLDDQUYrm : S3DI<0xF0, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), + "vlddqu\t{$src, $dst|$dst, $src}", + [(set VR256:$dst, (int_x86_avx_ldu_dq_256 addr:$src))]>, VEX; +} def LDDQUrm : S3DI<0xF0, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "lddqu\t{$src, $dst|$dst, $src}", [(set VR128:$dst, (int_x86_sse3_ldu_dq addr:$src))]>; @@ -3125,35 +3429,39 @@ let AddedComplexity = 20 in // SSE3 - Arithmetic //===---------------------------------------------------------------------===// -multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, bit Is2Addr = 1> { +multiclass sse3_addsub<Intrinsic Int, string OpcodeStr, RegisterClass RC, + X86MemOperand x86memop, bit Is2Addr = 1> { def rr : I<0xD0, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), + (outs RC:$dst), (ins RC:$src1, RC:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (Int VR128:$src1, - VR128:$src2))]>; + [(set RC:$dst, (Int RC:$src1, RC:$src2))]>; def rm : I<0xD0, MRMSrcMem, - (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), + (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (Int VR128:$src1, - (memop addr:$src2)))]>; - + [(set RC:$dst, (Int RC:$src1, (memop addr:$src2)))]>; } let isAsmParserOnly = 1, Predicates = [HasAVX], ExeDomain = SSEPackedDouble in { - defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", 0>, XD, - VEX_4V; - defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", 0>, OpSize, - VEX_4V; + defm VADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "vaddsubps", VR128, + f128mem, 0>, XD, VEX_4V; + defm VADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "vaddsubpd", VR128, + f128mem, 0>, OpSize, VEX_4V; + defm VADDSUBPSY : sse3_addsub<int_x86_avx_addsub_ps_256, "vaddsubps", VR256, + f256mem, 0>, XD, VEX_4V; + defm VADDSUBPDY : sse3_addsub<int_x86_avx_addsub_pd_256, "vaddsubpd", VR256, + f256mem, 0>, OpSize, VEX_4V; } let Constraints = "$src1 = $dst", Predicates = [HasSSE3], ExeDomain = SSEPackedDouble in { - defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps">, XD; - defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd">, TB, OpSize; + defm ADDSUBPS : sse3_addsub<int_x86_sse3_addsub_ps, "addsubps", VR128, + f128mem>, XD; + defm ADDSUBPD : sse3_addsub<int_x86_sse3_addsub_pd, "addsubpd", VR128, + f128mem>, TB, OpSize; } //===---------------------------------------------------------------------===// @@ -3161,61 +3469,72 @@ let Constraints = "$src1 = $dst", Predicates = [HasSSE3], //===---------------------------------------------------------------------===// // Horizontal ops -class S3D_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1> - : S3DI<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), +multiclass S3D_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC, + X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> { + def rr : S3DI<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (v4f32 (IntId VR128:$src1, VR128:$src2)))]>; -class S3D_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1> - : S3DI<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), + [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>; + + def rm : S3DI<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (v4f32 (IntId VR128:$src1, (memop addr:$src2))))]>; -class S3_Intrr<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1> - : S3I<o, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), + [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>; +} +multiclass S3_Int<bits<8> o, string OpcodeStr, ValueType vt, RegisterClass RC, + X86MemOperand x86memop, Intrinsic IntId, bit Is2Addr = 1> { + def rr : S3I<o, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (v2f64 (IntId VR128:$src1, VR128:$src2)))]>; -class S3_Intrm<bits<8> o, string OpcodeStr, Intrinsic IntId, bit Is2Addr = 1> - : S3I<o, MRMSrcMem, (outs VR128:$dst), (ins VR128:$src1, f128mem:$src2), + [(set RC:$dst, (vt (IntId RC:$src1, RC:$src2)))]>; + + def rm : S3I<o, MRMSrcMem, (outs RC:$dst), (ins RC:$src1, x86memop:$src2), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR128:$dst, (v2f64 (IntId VR128:$src1, (memopv2f64 addr:$src2))))]>; + [(set RC:$dst, (vt (IntId RC:$src1, (memop addr:$src2))))]>; +} let isAsmParserOnly = 1, Predicates = [HasAVX] in { - def VHADDPSrr : S3D_Intrr<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V; - def VHADDPSrm : S3D_Intrm<0x7C, "vhaddps", int_x86_sse3_hadd_ps, 0>, VEX_4V; - def VHADDPDrr : S3_Intrr <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V; - def VHADDPDrm : S3_Intrm <0x7C, "vhaddpd", int_x86_sse3_hadd_pd, 0>, VEX_4V; - def VHSUBPSrr : S3D_Intrr<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V; - def VHSUBPSrm : S3D_Intrm<0x7D, "vhsubps", int_x86_sse3_hsub_ps, 0>, VEX_4V; - def VHSUBPDrr : S3_Intrr <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V; - def VHSUBPDrm : S3_Intrm <0x7D, "vhsubpd", int_x86_sse3_hsub_pd, 0>, VEX_4V; + defm VHADDPS : S3D_Int<0x7C, "vhaddps", v4f32, VR128, f128mem, + int_x86_sse3_hadd_ps, 0>, VEX_4V; + defm VHADDPD : S3_Int <0x7C, "vhaddpd", v2f64, VR128, f128mem, + int_x86_sse3_hadd_pd, 0>, VEX_4V; + defm VHSUBPS : S3D_Int<0x7D, "vhsubps", v4f32, VR128, f128mem, + int_x86_sse3_hsub_ps, 0>, VEX_4V; + defm VHSUBPD : S3_Int <0x7D, "vhsubpd", v2f64, VR128, f128mem, + int_x86_sse3_hsub_pd, 0>, VEX_4V; + defm VHADDPSY : S3D_Int<0x7C, "vhaddps", v8f32, VR256, f256mem, + int_x86_avx_hadd_ps_256, 0>, VEX_4V; + defm VHADDPDY : S3_Int <0x7C, "vhaddpd", v4f64, VR256, f256mem, + int_x86_avx_hadd_pd_256, 0>, VEX_4V; + defm VHSUBPSY : S3D_Int<0x7D, "vhsubps", v8f32, VR256, f256mem, + int_x86_avx_hsub_ps_256, 0>, VEX_4V; + defm VHSUBPDY : S3_Int <0x7D, "vhsubpd", v4f64, VR256, f256mem, + int_x86_avx_hsub_pd_256, 0>, VEX_4V; } let Constraints = "$src1 = $dst" in { - def HADDPSrr : S3D_Intrr<0x7C, "haddps", int_x86_sse3_hadd_ps>; - def HADDPSrm : S3D_Intrm<0x7C, "haddps", int_x86_sse3_hadd_ps>; - def HADDPDrr : S3_Intrr <0x7C, "haddpd", int_x86_sse3_hadd_pd>; - def HADDPDrm : S3_Intrm <0x7C, "haddpd", int_x86_sse3_hadd_pd>; - def HSUBPSrr : S3D_Intrr<0x7D, "hsubps", int_x86_sse3_hsub_ps>; - def HSUBPSrm : S3D_Intrm<0x7D, "hsubps", int_x86_sse3_hsub_ps>; - def HSUBPDrr : S3_Intrr <0x7D, "hsubpd", int_x86_sse3_hsub_pd>; - def HSUBPDrm : S3_Intrm <0x7D, "hsubpd", int_x86_sse3_hsub_pd>; + defm HADDPS : S3D_Int<0x7C, "haddps", v4f32, VR128, f128mem, + int_x86_sse3_hadd_ps>; + defm HADDPD : S3_Int<0x7C, "haddpd", v2f64, VR128, f128mem, + int_x86_sse3_hadd_pd>; + defm HSUBPS : S3D_Int<0x7D, "hsubps", v4f32, VR128, f128mem, + int_x86_sse3_hsub_ps>; + defm HSUBPD : S3_Int<0x7D, "hsubpd", v2f64, VR128, f128mem, + int_x86_sse3_hsub_pd>; } //===---------------------------------------------------------------------===// // SSSE3 - Packed Absolute Instructions //===---------------------------------------------------------------------===// -/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}. -multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, - PatFrag mem_frag64, PatFrag mem_frag128, - Intrinsic IntId64, Intrinsic IntId128> { +/// SS3I_unop_rm_int_mm - Simple SSSE3 unary whose type can be v*{i8,i16,i32}. +multiclass SS3I_unop_rm_int_mm<bits<8> opc, string OpcodeStr, + PatFrag mem_frag64, Intrinsic IntId64> { def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR64:$dst, (IntId64 VR64:$src))]>; @@ -3224,7 +3543,11 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), [(set VR64:$dst, (IntId64 (bitconvert (mem_frag64 addr:$src))))]>; +} +/// SS3I_unop_rm_int - Simple SSSE3 unary op whose type can be v*{i8,i16,i32}. +multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, + PatFrag mem_frag128, Intrinsic IntId128> { def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src), !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), @@ -3240,26 +3563,28 @@ multiclass SS3I_unop_rm_int<bits<8> opc, string OpcodeStr, } let isAsmParserOnly = 1, Predicates = [HasAVX] in { - defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv8i8, memopv16i8, - int_x86_ssse3_pabs_b, + defm VPABSB : SS3I_unop_rm_int<0x1C, "vpabsb", memopv16i8, int_x86_ssse3_pabs_b_128>, VEX; - defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv4i16, memopv8i16, - int_x86_ssse3_pabs_w, + defm VPABSW : SS3I_unop_rm_int<0x1D, "vpabsw", memopv8i16, int_x86_ssse3_pabs_w_128>, VEX; - defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv2i32, memopv4i32, - int_x86_ssse3_pabs_d, + defm VPABSD : SS3I_unop_rm_int<0x1E, "vpabsd", memopv4i32, int_x86_ssse3_pabs_d_128>, VEX; } -defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv8i8, memopv16i8, - int_x86_ssse3_pabs_b, - int_x86_ssse3_pabs_b_128>; -defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv4i16, memopv8i16, - int_x86_ssse3_pabs_w, - int_x86_ssse3_pabs_w_128>; -defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32, - int_x86_ssse3_pabs_d, - int_x86_ssse3_pabs_d_128>; +defm PABSB : SS3I_unop_rm_int<0x1C, "pabsb", memopv16i8, + int_x86_ssse3_pabs_b_128>, + SS3I_unop_rm_int_mm<0x1C, "pabsb", memopv8i8, + int_x86_ssse3_pabs_b>; + +defm PABSW : SS3I_unop_rm_int<0x1D, "pabsw", memopv8i16, + int_x86_ssse3_pabs_w_128>, + SS3I_unop_rm_int_mm<0x1D, "pabsw", memopv4i16, + int_x86_ssse3_pabs_w>; + +defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv4i32, + int_x86_ssse3_pabs_d_128>, + SS3I_unop_rm_int_mm<0x1E, "pabsd", memopv2i32, + int_x86_ssse3_pabs_d>; //===---------------------------------------------------------------------===// // SSSE3 - Packed Binary Operator Instructions @@ -3267,26 +3592,9 @@ defm PABSD : SS3I_unop_rm_int<0x1E, "pabsd", memopv2i32, memopv4i32, /// SS3I_binop_rm_int - Simple SSSE3 bin op whose type can be v*{i8,i16,i32}. multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr, - PatFrag mem_frag64, PatFrag mem_frag128, - Intrinsic IntId64, Intrinsic IntId128, + PatFrag mem_frag128, Intrinsic IntId128, bit Is2Addr = 1> { let isCommutable = 1 in - def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), - (ins VR64:$src1, VR64:$src2), - !if(Is2Addr, - !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>; - def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), - (ins VR64:$src1, i64mem:$src2), - !if(Is2Addr, - !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), - !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}")), - [(set VR64:$dst, - (IntId64 VR64:$src1, - (bitconvert (memopv8i8 addr:$src2))))]>; - - let isCommutable = 1 in def rr128 : SS38I<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), !if(Is2Addr, @@ -3303,88 +3611,102 @@ multiclass SS3I_binop_rm_int<bits<8> opc, string OpcodeStr, (IntId128 VR128:$src1, (bitconvert (memopv16i8 addr:$src2))))]>, OpSize; } +multiclass SS3I_binop_rm_int_mm<bits<8> opc, string OpcodeStr, + PatFrag mem_frag64, Intrinsic IntId64> { + let isCommutable = 1 in + def rr64 : SS38I<opc, MRMSrcReg, (outs VR64:$dst), + (ins VR64:$src1, VR64:$src2), + !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), + [(set VR64:$dst, (IntId64 VR64:$src1, VR64:$src2))]>; + def rm64 : SS38I<opc, MRMSrcMem, (outs VR64:$dst), + (ins VR64:$src1, i64mem:$src2), + !strconcat(OpcodeStr, "\t{$src2, $dst|$dst, $src2}"), + [(set VR64:$dst, + (IntId64 VR64:$src1, + (bitconvert (memopv8i8 addr:$src2))))]>; +} let isAsmParserOnly = 1, Predicates = [HasAVX] in { let isCommutable = 0 in { - defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv4i16, memopv8i16, - int_x86_ssse3_phadd_w, + defm VPHADDW : SS3I_binop_rm_int<0x01, "vphaddw", memopv8i16, int_x86_ssse3_phadd_w_128, 0>, VEX_4V; - defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv2i32, memopv4i32, - int_x86_ssse3_phadd_d, + defm VPHADDD : SS3I_binop_rm_int<0x02, "vphaddd", memopv4i32, int_x86_ssse3_phadd_d_128, 0>, VEX_4V; - defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv4i16, memopv8i16, - int_x86_ssse3_phadd_sw, + defm VPHADDSW : SS3I_binop_rm_int<0x03, "vphaddsw", memopv8i16, int_x86_ssse3_phadd_sw_128, 0>, VEX_4V; - defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv4i16, memopv8i16, - int_x86_ssse3_phsub_w, + defm VPHSUBW : SS3I_binop_rm_int<0x05, "vphsubw", memopv8i16, int_x86_ssse3_phsub_w_128, 0>, VEX_4V; - defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv2i32, memopv4i32, - int_x86_ssse3_phsub_d, + defm VPHSUBD : SS3I_binop_rm_int<0x06, "vphsubd", memopv4i32, int_x86_ssse3_phsub_d_128, 0>, VEX_4V; - defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv4i16, memopv8i16, - int_x86_ssse3_phsub_sw, + defm VPHSUBSW : SS3I_binop_rm_int<0x07, "vphsubsw", memopv8i16, int_x86_ssse3_phsub_sw_128, 0>, VEX_4V; - defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv8i8, memopv16i8, - int_x86_ssse3_pmadd_ub_sw, + defm VPMADDUBSW : SS3I_binop_rm_int<0x04, "vpmaddubsw", memopv16i8, int_x86_ssse3_pmadd_ub_sw_128, 0>, VEX_4V; - defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv8i8, memopv16i8, - int_x86_ssse3_pshuf_b, + defm VPSHUFB : SS3I_binop_rm_int<0x00, "vpshufb", memopv16i8, int_x86_ssse3_pshuf_b_128, 0>, VEX_4V; - defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv8i8, memopv16i8, - int_x86_ssse3_psign_b, + defm VPSIGNB : SS3I_binop_rm_int<0x08, "vpsignb", memopv16i8, int_x86_ssse3_psign_b_128, 0>, VEX_4V; - defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv4i16, memopv8i16, - int_x86_ssse3_psign_w, + defm VPSIGNW : SS3I_binop_rm_int<0x09, "vpsignw", memopv8i16, int_x86_ssse3_psign_w_128, 0>, VEX_4V; - defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv2i32, memopv4i32, - int_x86_ssse3_psign_d, + defm VPSIGND : SS3I_binop_rm_int<0x0A, "vpsignd", memopv4i32, int_x86_ssse3_psign_d_128, 0>, VEX_4V; } -defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv4i16, memopv8i16, - int_x86_ssse3_pmul_hr_sw, +defm VPMULHRSW : SS3I_binop_rm_int<0x0B, "vpmulhrsw", memopv8i16, int_x86_ssse3_pmul_hr_sw_128, 0>, VEX_4V; } // None of these have i8 immediate fields. let ImmT = NoImm, Constraints = "$src1 = $dst" in { let isCommutable = 0 in { - defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv4i16, memopv8i16, - int_x86_ssse3_phadd_w, - int_x86_ssse3_phadd_w_128>; - defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv2i32, memopv4i32, - int_x86_ssse3_phadd_d, - int_x86_ssse3_phadd_d_128>; - defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv4i16, memopv8i16, - int_x86_ssse3_phadd_sw, - int_x86_ssse3_phadd_sw_128>; - defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv4i16, memopv8i16, - int_x86_ssse3_phsub_w, - int_x86_ssse3_phsub_w_128>; - defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv2i32, memopv4i32, - int_x86_ssse3_phsub_d, - int_x86_ssse3_phsub_d_128>; - defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv4i16, memopv8i16, - int_x86_ssse3_phsub_sw, - int_x86_ssse3_phsub_sw_128>; - defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv8i8, memopv16i8, - int_x86_ssse3_pmadd_ub_sw, - int_x86_ssse3_pmadd_ub_sw_128>; - defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, memopv16i8, - int_x86_ssse3_pshuf_b, - int_x86_ssse3_pshuf_b_128>; - defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv8i8, memopv16i8, - int_x86_ssse3_psign_b, - int_x86_ssse3_psign_b_128>; - defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv4i16, memopv8i16, - int_x86_ssse3_psign_w, - int_x86_ssse3_psign_w_128>; - defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv2i32, memopv4i32, - int_x86_ssse3_psign_d, - int_x86_ssse3_psign_d_128>; -} -defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv4i16, memopv8i16, - int_x86_ssse3_pmul_hr_sw, - int_x86_ssse3_pmul_hr_sw_128>; + defm PHADDW : SS3I_binop_rm_int<0x01, "phaddw", memopv8i16, + int_x86_ssse3_phadd_w_128>, + SS3I_binop_rm_int_mm<0x01, "phaddw", memopv4i16, + int_x86_ssse3_phadd_w>; + defm PHADDD : SS3I_binop_rm_int<0x02, "phaddd", memopv4i32, + int_x86_ssse3_phadd_d_128>, + SS3I_binop_rm_int_mm<0x02, "phaddd", memopv2i32, + int_x86_ssse3_phadd_d>; + defm PHADDSW : SS3I_binop_rm_int<0x03, "phaddsw", memopv8i16, + int_x86_ssse3_phadd_sw_128>, + SS3I_binop_rm_int_mm<0x03, "phaddsw", memopv4i16, + int_x86_ssse3_phadd_sw>; + defm PHSUBW : SS3I_binop_rm_int<0x05, "phsubw", memopv8i16, + int_x86_ssse3_phsub_w_128>, + SS3I_binop_rm_int_mm<0x05, "phsubw", memopv4i16, + int_x86_ssse3_phsub_w>; + defm PHSUBD : SS3I_binop_rm_int<0x06, "phsubd", memopv4i32, + int_x86_ssse3_phsub_d_128>, + SS3I_binop_rm_int_mm<0x06, "phsubd", memopv2i32, + int_x86_ssse3_phsub_d>; + defm PHSUBSW : SS3I_binop_rm_int<0x07, "phsubsw", memopv8i16, + int_x86_ssse3_phsub_sw_128>, + SS3I_binop_rm_int_mm<0x07, "phsubsw", memopv4i16, + int_x86_ssse3_phsub_sw>; + defm PMADDUBSW : SS3I_binop_rm_int<0x04, "pmaddubsw", memopv16i8, + int_x86_ssse3_pmadd_ub_sw_128>, + SS3I_binop_rm_int_mm<0x04, "pmaddubsw", memopv8i8, + int_x86_ssse3_pmadd_ub_sw>; + defm PSHUFB : SS3I_binop_rm_int<0x00, "pshufb", memopv8i8, + int_x86_ssse3_pshuf_b_128>, + SS3I_binop_rm_int_mm<0x00, "pshufb", memopv8i8, + int_x86_ssse3_pshuf_b>; + defm PSIGNB : SS3I_binop_rm_int<0x08, "psignb", memopv16i8, + int_x86_ssse3_psign_b_128>, + SS3I_binop_rm_int_mm<0x08, "psignb", memopv8i8, + int_x86_ssse3_psign_b>; + defm PSIGNW : SS3I_binop_rm_int<0x09, "psignw", memopv8i16, + int_x86_ssse3_psign_w_128>, + SS3I_binop_rm_int_mm<0x09, "psignw", memopv4i16, + int_x86_ssse3_psign_w>; + defm PSIGND : SS3I_binop_rm_int<0x0A, "psignd", memopv4i32, + int_x86_ssse3_psign_d_128>, + SS3I_binop_rm_int_mm<0x0A, "psignd", memopv2i32, + int_x86_ssse3_psign_d>; +} +defm PMULHRSW : SS3I_binop_rm_int<0x0B, "pmulhrsw", memopv8i16, + int_x86_ssse3_pmul_hr_sw_128>, + SS3I_binop_rm_int_mm<0x0B, "pmulhrsw", memopv4i16, + int_x86_ssse3_pmul_hr_sw>; } def : Pat<(X86pshufb VR128:$src, VR128:$mask), @@ -3396,22 +3718,16 @@ def : Pat<(X86pshufb VR128:$src, (bc_v16i8 (memopv2i64 addr:$mask))), // SSSE3 - Packed Align Instruction Patterns //===---------------------------------------------------------------------===// -multiclass sse3_palign<string asm, bit Is2Addr = 1> { +multiclass ssse3_palign_mm<string asm> { def R64rr : SS3AI<0x0F, MRMSrcReg, (outs VR64:$dst), (ins VR64:$src1, VR64:$src2, i8imm:$src3), - !if(Is2Addr, - !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), - !strconcat(asm, - "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - []>; + !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>; def R64rm : SS3AI<0x0F, MRMSrcMem, (outs VR64:$dst), (ins VR64:$src1, i64mem:$src2, i8imm:$src3), - !if(Is2Addr, - !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), - !strconcat(asm, - "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - []>; + !strconcat(asm, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), []>; +} +multiclass ssse3_palign<string asm, bit Is2Addr = 1> { def R128rr : SS3AI<0x0F, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i8imm:$src3), !if(Is2Addr, @@ -3429,9 +3745,10 @@ multiclass sse3_palign<string asm, bit Is2Addr = 1> { } let isAsmParserOnly = 1, Predicates = [HasAVX] in - defm VPALIGN : sse3_palign<"vpalignr", 0>, VEX_4V; + defm VPALIGN : ssse3_palign<"vpalignr", 0>, VEX_4V; let Constraints = "$src1 = $dst" in - defm PALIGN : sse3_palign<"palignr">; + defm PALIGN : ssse3_palign<"palignr">, + ssse3_palign_mm<"palignr">; let AddedComplexity = 5 in { @@ -3732,31 +4049,62 @@ def : Pat<(v2i32 (fp_to_sint (v2f64 VR128:$src))), (Int_CVTTPD2PIrr VR128:$src)>, Requires<[HasSSE2]>; // Use movaps / movups for SSE integer load / store (one byte shorter). -def : Pat<(alignedloadv4i32 addr:$src), - (MOVAPSrm addr:$src)>; -def : Pat<(loadv4i32 addr:$src), - (MOVUPSrm addr:$src)>; -def : Pat<(alignedloadv2i64 addr:$src), - (MOVAPSrm addr:$src)>; -def : Pat<(loadv2i64 addr:$src), - (MOVUPSrm addr:$src)>; - -def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst), - (MOVAPSmr addr:$dst, VR128:$src)>; -def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst), - (MOVAPSmr addr:$dst, VR128:$src)>; -def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst), - (MOVAPSmr addr:$dst, VR128:$src)>; -def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst), - (MOVAPSmr addr:$dst, VR128:$src)>; -def : Pat<(store (v2i64 VR128:$src), addr:$dst), - (MOVUPSmr addr:$dst, VR128:$src)>; -def : Pat<(store (v4i32 VR128:$src), addr:$dst), - (MOVUPSmr addr:$dst, VR128:$src)>; -def : Pat<(store (v8i16 VR128:$src), addr:$dst), - (MOVUPSmr addr:$dst, VR128:$src)>; -def : Pat<(store (v16i8 VR128:$src), addr:$dst), - (MOVUPSmr addr:$dst, VR128:$src)>; +let Predicates = [HasSSE1] in { + def : Pat<(alignedloadv4i32 addr:$src), + (MOVAPSrm addr:$src)>; + def : Pat<(loadv4i32 addr:$src), + (MOVUPSrm addr:$src)>; + def : Pat<(alignedloadv2i64 addr:$src), + (MOVAPSrm addr:$src)>; + def : Pat<(loadv2i64 addr:$src), + (MOVUPSrm addr:$src)>; + + def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst), + (MOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst), + (MOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst), + (MOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst), + (MOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v2i64 VR128:$src), addr:$dst), + (MOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v4i32 VR128:$src), addr:$dst), + (MOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v8i16 VR128:$src), addr:$dst), + (MOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v16i8 VR128:$src), addr:$dst), + (MOVUPSmr addr:$dst, VR128:$src)>; +} + +// Use vmovaps/vmovups for AVX 128-bit integer load/store (one byte shorter). +let Predicates = [HasAVX] in { + def : Pat<(alignedloadv4i32 addr:$src), + (VMOVAPSrm addr:$src)>; + def : Pat<(loadv4i32 addr:$src), + (VMOVUPSrm addr:$src)>; + def : Pat<(alignedloadv2i64 addr:$src), + (VMOVAPSrm addr:$src)>; + def : Pat<(loadv2i64 addr:$src), + (VMOVUPSrm addr:$src)>; + + def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst), + (VMOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst), + (VMOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst), + (VMOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst), + (VMOVAPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v2i64 VR128:$src), addr:$dst), + (VMOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v4i32 VR128:$src), addr:$dst), + (VMOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v8i16 VR128:$src), addr:$dst), + (VMOVUPSmr addr:$dst, VR128:$src)>; + def : Pat<(store (v16i8 VR128:$src), addr:$dst), + (VMOVUPSmr addr:$dst, VR128:$src)>; +} //===----------------------------------------------------------------------===// // SSE4.1 - Packed Move with Sign/Zero Extend @@ -3923,8 +4271,12 @@ multiclass SS41I_extract8<bits<8> opc, string OpcodeStr> { // (store (i8 (trunc (X86pextrb (v16i8 VR128:$src1), imm:$src2))), addr:$dst) } -let isAsmParserOnly = 1, Predicates = [HasAVX] in +let isAsmParserOnly = 1, Predicates = [HasAVX] in { defm VPEXTRB : SS41I_extract8<0x14, "vpextrb">, VEX; + def VPEXTRBrr64 : SS4AIi8<0x14, MRMDestReg, (outs GR64:$dst), + (ins VR128:$src1, i32i8imm:$src2), + "vpextrb\t{$src2, $src1, $dst|$dst, $src1, $src2}", []>, OpSize, VEX; +} defm PEXTRB : SS41I_extract8<0x14, "pextrb">; @@ -4007,8 +4359,13 @@ multiclass SS41I_extractf32<bits<8> opc, string OpcodeStr> { addr:$dst)]>, OpSize; } -let isAsmParserOnly = 1, Predicates = [HasAVX] in +let isAsmParserOnly = 1, Predicates = [HasAVX] in { defm VEXTRACTPS : SS41I_extractf32<0x17, "vextractps">, VEX; + def VEXTRACTPSrr64 : SS4AIi8<0x17, MRMDestReg, (outs GR64:$dst), + (ins VR128:$src1, i32i8imm:$src2), + "vextractps \t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, OpSize, VEX; +} defm EXTRACTPS : SS41I_extractf32<0x17, "extractps">; // Also match an EXTRACTPS store when the store is done as f32 instead of i32. @@ -4131,80 +4488,84 @@ let isAsmParserOnly = 1, Predicates = [HasAVX] in defm VINSERTPS : SS41I_insertf32<0x21, "vinsertps", 0>, VEX_4V; def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3), - (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>; + (VINSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>, + Requires<[HasAVX]>; +def : Pat<(int_x86_sse41_insertps VR128:$src1, VR128:$src2, imm:$src3), + (INSERTPSrr VR128:$src1, VR128:$src2, imm:$src3)>, + Requires<[HasSSE41]>; //===----------------------------------------------------------------------===// // SSE4.1 - Round Instructions //===----------------------------------------------------------------------===// -multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, - string OpcodeStr, - Intrinsic V4F32Int, - Intrinsic V2F64Int> { +multiclass sse41_fp_unop_rm<bits<8> opcps, bits<8> opcpd, string OpcodeStr, + X86MemOperand x86memop, RegisterClass RC, + PatFrag mem_frag32, PatFrag mem_frag64, + Intrinsic V4F32Int, Intrinsic V2F64Int> { // Intrinsic operation, reg. // Vector intrinsic operation, reg def PSr_Int : SS4AIi8<opcps, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR128:$dst, (V4F32Int VR128:$src1, imm:$src2))]>, + [(set RC:$dst, (V4F32Int RC:$src1, imm:$src2))]>, OpSize; // Vector intrinsic operation, mem def PSm_Int : Ii8<opcps, MRMSrcMem, - (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR128:$dst, - (V4F32Int (memopv4f32 addr:$src1),imm:$src2))]>, + [(set RC:$dst, + (V4F32Int (mem_frag32 addr:$src1),imm:$src2))]>, TA, OpSize, Requires<[HasSSE41]>; // Vector intrinsic operation, reg def PDr_Int : SS4AIi8<opcpd, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR128:$dst, (V2F64Int VR128:$src1, imm:$src2))]>, + [(set RC:$dst, (V2F64Int RC:$src1, imm:$src2))]>, OpSize; // Vector intrinsic operation, mem def PDm_Int : SS4AIi8<opcpd, MRMSrcMem, - (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins f256mem:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), - [(set VR128:$dst, - (V2F64Int (memopv2f64 addr:$src1),imm:$src2))]>, + [(set RC:$dst, + (V2F64Int (mem_frag64 addr:$src1),imm:$src2))]>, OpSize; } -multiclass sse41_fp_unop_rm_avx<bits<8> opcps, bits<8> opcpd, - string OpcodeStr> { +multiclass sse41_fp_unop_rm_avx_p<bits<8> opcps, bits<8> opcpd, + RegisterClass RC, X86MemOperand x86memop, string OpcodeStr> { // Intrinsic operation, reg. // Vector intrinsic operation, reg def PSr : SS4AIi8<opcps, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, OpSize; // Vector intrinsic operation, mem def PSm : Ii8<opcps, MRMSrcMem, - (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "ps\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, TA, OpSize, Requires<[HasSSE41]>; // Vector intrinsic operation, reg def PDr : SS4AIi8<opcpd, MRMSrcReg, - (outs VR128:$dst), (ins VR128:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins RC:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, OpSize; // Vector intrinsic operation, mem def PDm : SS4AIi8<opcpd, MRMSrcMem, - (outs VR128:$dst), (ins f128mem:$src1, i32i8imm:$src2), + (outs RC:$dst), (ins x86memop:$src1, i32i8imm:$src2), !strconcat(OpcodeStr, "pd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>, OpSize; @@ -4261,8 +4622,8 @@ multiclass sse41_fp_binop_rm<bits<8> opcss, bits<8> opcsd, OpSize; } -multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd, - string OpcodeStr> { +multiclass sse41_fp_binop_rm_avx_s<bits<8> opcss, bits<8> opcsd, + string OpcodeStr> { // Intrinsic operation, reg. def SSr : SS4AIi8<opcss, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3), @@ -4295,24 +4656,90 @@ multiclass sse41_fp_binop_rm_avx<bits<8> opcss, bits<8> opcsd, // FP round - roundss, roundps, roundsd, roundpd let isAsmParserOnly = 1, Predicates = [HasAVX] in { // Intrinsic form - defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", - int_x86_sse41_round_ps, int_x86_sse41_round_pd>, - VEX; + defm VROUND : sse41_fp_unop_rm<0x08, 0x09, "vround", f128mem, VR128, + memopv4f32, memopv2f64, + int_x86_sse41_round_ps, + int_x86_sse41_round_pd>, VEX; + defm VROUNDY : sse41_fp_unop_rm<0x08, 0x09, "vround", f256mem, VR256, + memopv8f32, memopv4f64, + int_x86_avx_round_ps_256, + int_x86_avx_round_pd_256>, VEX; defm VROUND : sse41_fp_binop_rm<0x0A, 0x0B, "vround", - int_x86_sse41_round_ss, int_x86_sse41_round_sd, - 0>, VEX_4V; + int_x86_sse41_round_ss, + int_x86_sse41_round_sd, 0>, VEX_4V; + // Instructions for the assembler - defm VROUND : sse41_fp_unop_rm_avx<0x08, 0x09, "vround">, VEX; - defm VROUND : sse41_fp_binop_rm_avx<0x0A, 0x0B, "vround">, VEX_4V; + defm VROUND : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR128, f128mem, "vround">, + VEX; + defm VROUNDY : sse41_fp_unop_rm_avx_p<0x08, 0x09, VR256, f256mem, "vround">, + VEX; + defm VROUND : sse41_fp_binop_rm_avx_s<0x0A, 0x0B, "vround">, VEX_4V; } -defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", +defm ROUND : sse41_fp_unop_rm<0x08, 0x09, "round", f128mem, VR128, + memopv4f32, memopv2f64, int_x86_sse41_round_ps, int_x86_sse41_round_pd>; let Constraints = "$src1 = $dst" in defm ROUND : sse41_fp_binop_rm<0x0A, 0x0B, "round", int_x86_sse41_round_ss, int_x86_sse41_round_sd>; //===----------------------------------------------------------------------===// +// SSE4.1 - Packed Bit Test +//===----------------------------------------------------------------------===// + +// ptest instruction we'll lower to this in X86ISelLowering primarily from +// the intel intrinsic that corresponds to this. +let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in { +def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), + "vptest\t{$src2, $src1|$src1, $src2}", + [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>, + OpSize, VEX; +def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), + "vptest\t{$src2, $src1|$src1, $src2}", + [(set EFLAGS,(X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>, + OpSize, VEX; + +def VPTESTYrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR256:$src1, VR256:$src2), + "vptest\t{$src2, $src1|$src1, $src2}", + [(set EFLAGS, (X86ptest VR256:$src1, (v4i64 VR256:$src2)))]>, + OpSize, VEX; +def VPTESTYrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR256:$src1, i256mem:$src2), + "vptest\t{$src2, $src1|$src1, $src2}", + [(set EFLAGS,(X86ptest VR256:$src1, (memopv4i64 addr:$src2)))]>, + OpSize, VEX; +} + +let Defs = [EFLAGS] in { +def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), + "ptest \t{$src2, $src1|$src1, $src2}", + [(set EFLAGS, (X86ptest VR128:$src1, (v4f32 VR128:$src2)))]>, + OpSize; +def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, f128mem:$src2), + "ptest \t{$src2, $src1|$src1, $src2}", + [(set EFLAGS, (X86ptest VR128:$src1, (memopv4f32 addr:$src2)))]>, + OpSize; +} + +// The bit test instructions below are AVX only +multiclass avx_bittest<bits<8> opc, string OpcodeStr, RegisterClass RC, + X86MemOperand x86memop, PatFrag mem_frag, ValueType vt> { + def rr : SS48I<opc, MRMSrcReg, (outs), (ins RC:$src1, RC:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), + [(set EFLAGS, (X86testp RC:$src1, (vt RC:$src2)))]>, OpSize, VEX; + def rm : SS48I<opc, MRMSrcMem, (outs), (ins RC:$src1, x86memop:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1|$src1, $src2}"), + [(set EFLAGS, (X86testp RC:$src1, (mem_frag addr:$src2)))]>, + OpSize, VEX; +} + +let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in { +defm VTESTPS : avx_bittest<0x0E, "vtestps", VR128, f128mem, memopv4f32, v4f32>; +defm VTESTPSY : avx_bittest<0x0E, "vtestps", VR256, f256mem, memopv8f32, v8f32>; +defm VTESTPD : avx_bittest<0x0F, "vtestpd", VR128, f128mem, memopv2f64, v2f64>; +defm VTESTPDY : avx_bittest<0x0F, "vtestpd", VR256, f256mem, memopv4f64, v4f64>; +} + +//===----------------------------------------------------------------------===// // SSE4.1 - Misc Instructions //===----------------------------------------------------------------------===// @@ -4431,79 +4858,104 @@ let Constraints = "$src1 = $dst" in /// SS41I_binop_rmi_int - SSE 4.1 binary operator with 8-bit immediate multiclass SS41I_binop_rmi_int<bits<8> opc, string OpcodeStr, - Intrinsic IntId128, bit Is2Addr = 1> { + Intrinsic IntId, RegisterClass RC, PatFrag memop_frag, + X86MemOperand x86memop, bit Is2Addr = 1> { let isCommutable = 1 in - def rri : SS4AIi8<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, i32i8imm:$src3), + def rri : SS4AIi8<opc, MRMSrcReg, (outs RC:$dst), + (ins RC:$src1, RC:$src2, i32i8imm:$src3), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - [(set VR128:$dst, - (IntId128 VR128:$src1, VR128:$src2, imm:$src3))]>, + [(set RC:$dst, (IntId RC:$src1, RC:$src2, imm:$src3))]>, OpSize; - def rmi : SS4AIi8<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2, i32i8imm:$src3), + def rmi : SS4AIi8<opc, MRMSrcMem, (outs RC:$dst), + (ins RC:$src1, x86memop:$src2, i32i8imm:$src3), !if(Is2Addr, !strconcat(OpcodeStr, "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), !strconcat(OpcodeStr, "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}")), - [(set VR128:$dst, - (IntId128 VR128:$src1, - (bitconvert (memopv16i8 addr:$src2)), imm:$src3))]>, + [(set RC:$dst, + (IntId RC:$src1, + (bitconvert (memop_frag addr:$src2)), imm:$src3))]>, OpSize; } let isAsmParserOnly = 1, Predicates = [HasAVX] in { let isCommutable = 0 in { defm VBLENDPS : SS41I_binop_rmi_int<0x0C, "vblendps", int_x86_sse41_blendps, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; defm VBLENDPD : SS41I_binop_rmi_int<0x0D, "vblendpd", int_x86_sse41_blendpd, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; + defm VBLENDPSY : SS41I_binop_rmi_int<0x0C, "vblendps", + int_x86_avx_blend_ps_256, VR256, memopv32i8, i256mem, 0>, VEX_4V; + defm VBLENDPDY : SS41I_binop_rmi_int<0x0D, "vblendpd", + int_x86_avx_blend_pd_256, VR256, memopv32i8, i256mem, 0>, VEX_4V; defm VPBLENDW : SS41I_binop_rmi_int<0x0E, "vpblendw", int_x86_sse41_pblendw, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; defm VMPSADBW : SS41I_binop_rmi_int<0x42, "vmpsadbw", int_x86_sse41_mpsadbw, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; } defm VDPPS : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_sse41_dpps, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; defm VDPPD : SS41I_binop_rmi_int<0x41, "vdppd", int_x86_sse41_dppd, - 0>, VEX_4V; + VR128, memopv16i8, i128mem, 0>, VEX_4V; + defm VDPPSY : SS41I_binop_rmi_int<0x40, "vdpps", int_x86_avx_dp_ps_256, + VR256, memopv32i8, i256mem, 0>, VEX_4V; } let Constraints = "$src1 = $dst" in { let isCommutable = 0 in { - defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps>; - defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd>; - defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw>; - defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw>; + defm BLENDPS : SS41I_binop_rmi_int<0x0C, "blendps", int_x86_sse41_blendps, + VR128, memopv16i8, i128mem>; + defm BLENDPD : SS41I_binop_rmi_int<0x0D, "blendpd", int_x86_sse41_blendpd, + VR128, memopv16i8, i128mem>; + defm PBLENDW : SS41I_binop_rmi_int<0x0E, "pblendw", int_x86_sse41_pblendw, + VR128, memopv16i8, i128mem>; + defm MPSADBW : SS41I_binop_rmi_int<0x42, "mpsadbw", int_x86_sse41_mpsadbw, + VR128, memopv16i8, i128mem>; } - defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps>; - defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd>; + defm DPPS : SS41I_binop_rmi_int<0x40, "dpps", int_x86_sse41_dpps, + VR128, memopv16i8, i128mem>; + defm DPPD : SS41I_binop_rmi_int<0x41, "dppd", int_x86_sse41_dppd, + VR128, memopv16i8, i128mem>; } /// SS41I_quaternary_int_avx - AVX SSE 4.1 with 4 operators let isAsmParserOnly = 1, Predicates = [HasAVX] in { - multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr> { - def rr : I<opc, MRMSrcReg, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, VR128:$src3), - !strconcat(OpcodeStr, - "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), - [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; - - def rm : I<opc, MRMSrcMem, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2, VR128:$src3), - !strconcat(OpcodeStr, - "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), - [], SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; - } -} - -defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd">; -defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps">; -defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb">; +multiclass SS41I_quaternary_int_avx<bits<8> opc, string OpcodeStr, + RegisterClass RC, X86MemOperand x86memop, + PatFrag mem_frag, Intrinsic IntId> { + def rr : I<opc, MRMSrcReg, (outs RC:$dst), + (ins RC:$src1, RC:$src2, RC:$src3), + !strconcat(OpcodeStr, + "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), + [(set RC:$dst, (IntId RC:$src1, RC:$src2, RC:$src3))], + SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; + + def rm : I<opc, MRMSrcMem, (outs RC:$dst), + (ins RC:$src1, x86memop:$src2, RC:$src3), + !strconcat(OpcodeStr, + "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), + [(set RC:$dst, + (IntId RC:$src1, (bitconvert (mem_frag addr:$src2)), + RC:$src3))], + SSEPackedInt>, OpSize, TA, VEX_4V, VEX_I8IMM; +} +} + +defm VBLENDVPD : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR128, i128mem, + memopv16i8, int_x86_sse41_blendvpd>; +defm VBLENDVPS : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR128, i128mem, + memopv16i8, int_x86_sse41_blendvps>; +defm VPBLENDVB : SS41I_quaternary_int_avx<0x4C, "vpblendvb", VR128, i128mem, + memopv16i8, int_x86_sse41_pblendvb>; +defm VBLENDVPDY : SS41I_quaternary_int_avx<0x4B, "vblendvpd", VR256, i256mem, + memopv32i8, int_x86_avx_blendv_pd_256>; +defm VBLENDVPSY : SS41I_quaternary_int_avx<0x4A, "vblendvps", VR256, i256mem, + memopv32i8, int_x86_avx_blendv_ps_256>; /// SS41I_ternary_int - SSE 4.1 ternary operator let Uses = [XMM0], Constraints = "$src1 = $dst" in { @@ -4529,30 +4981,6 @@ defm BLENDVPD : SS41I_ternary_int<0x15, "blendvpd", int_x86_sse41_blendvpd>; defm BLENDVPS : SS41I_ternary_int<0x14, "blendvps", int_x86_sse41_blendvps>; defm PBLENDVB : SS41I_ternary_int<0x10, "pblendvb", int_x86_sse41_pblendvb>; -// ptest instruction we'll lower to this in X86ISelLowering primarily from -// the intel intrinsic that corresponds to this. -let Defs = [EFLAGS], isAsmParserOnly = 1, Predicates = [HasAVX] in { -def VPTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "vptest\t{$src2, $src1|$src1, $src2}", - [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>, - OpSize, VEX; -def VPTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2), - "vptest\t{$src2, $src1|$src1, $src2}", - [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>, - OpSize, VEX; -} - -let Defs = [EFLAGS] in { -def PTESTrr : SS48I<0x17, MRMSrcReg, (outs), (ins VR128:$src1, VR128:$src2), - "ptest \t{$src2, $src1|$src1, $src2}", - [(set EFLAGS, (X86ptest VR128:$src1, VR128:$src2))]>, - OpSize; -def PTESTrm : SS48I<0x17, MRMSrcMem, (outs), (ins VR128:$src1, i128mem:$src2), - "ptest \t{$src2, $src1|$src1, $src2}", - [(set EFLAGS, (X86ptest VR128:$src1, (load addr:$src2)))]>, - OpSize; -} - let isAsmParserOnly = 1, Predicates = [HasAVX] in def VMOVNTDQArm : SS48I<0x2A, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), "vmovntdqa\t{$src, $dst|$dst, $src}", @@ -4603,17 +5031,20 @@ def : Pat<(v2i64 (X86pcmpgtq VR128:$src1, (memop addr:$src2))), //===----------------------------------------------------------------------===// // Packed Compare Implicit Length Strings, Return Mask -let Defs = [EFLAGS], usesCustomInserter = 1 in { - def PCMPISTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src2, i8imm:$src3), - "#PCMPISTRM128rr PSEUDO!", +multiclass pseudo_pcmpistrm<string asm> { + def REG : Ii8<0, Pseudo, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), !strconcat(asm, "rr PSEUDO"), [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 VR128:$src1, VR128:$src2, - imm:$src3))]>, OpSize; - def PCMPISTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src2, i8imm:$src3), - "#PCMPISTRM128rm PSEUDO!", + imm:$src3))]>; + def MEM : Ii8<0, Pseudo, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), !strconcat(asm, "rm PSEUDO"), [(set VR128:$dst, (int_x86_sse42_pcmpistrm128 - VR128:$src1, (load addr:$src2), imm:$src3))]>, OpSize; + VR128:$src1, (load addr:$src2), imm:$src3))]>; +} + +let Defs = [EFLAGS], usesCustomInserter = 1 in { + defm PCMPISTRM128 : pseudo_pcmpistrm<"#PCMPISTRM128">, Requires<[HasSSE42]>; + defm VPCMPISTRM128 : pseudo_pcmpistrm<"#VPCMPISTRM128">, Requires<[HasAVX]>; } let Defs = [XMM0, EFLAGS], isAsmParserOnly = 1, @@ -4636,20 +5067,20 @@ let Defs = [XMM0, EFLAGS] in { } // Packed Compare Explicit Length Strings, Return Mask -let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { - def PCMPESTRM128REG : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, VR128:$src3, i8imm:$src5), - "#PCMPESTRM128rr PSEUDO!", - [(set VR128:$dst, - (int_x86_sse42_pcmpestrm128 - VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>, OpSize; - - def PCMPESTRM128MEM : SS42AI<0, Pseudo, (outs VR128:$dst), - (ins VR128:$src1, i128mem:$src3, i8imm:$src5), - "#PCMPESTRM128rm PSEUDO!", +multiclass pseudo_pcmpestrm<string asm> { + def REG : Ii8<0, Pseudo, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src3, i8imm:$src5), !strconcat(asm, "rr PSEUDO"), + [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 + VR128:$src1, EAX, VR128:$src3, EDX, imm:$src5))]>; + def MEM : Ii8<0, Pseudo, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src3, i8imm:$src5), !strconcat(asm, "rm PSEUDO"), [(set VR128:$dst, (int_x86_sse42_pcmpestrm128 - VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>, - OpSize; + VR128:$src1, EAX, (load addr:$src3), EDX, imm:$src5))]>; +} + +let Defs = [EFLAGS], Uses = [EAX, EDX], usesCustomInserter = 1 in { + defm PCMPESTRM128 : pseudo_pcmpestrm<"#PCMPESTRM128">, Requires<[HasSSE42]>; + defm VPCMPESTRM128 : pseudo_pcmpestrm<"#VPCMPESTRM128">, Requires<[HasAVX]>; } let isAsmParserOnly = 1, Predicates = [HasAVX], @@ -4941,3 +5372,579 @@ def AESKEYGENASSIST128rm : AESAI<0xDF, MRMSrcMem, (outs VR128:$dst), (int_x86_aesni_aeskeygenassist (bitconvert (memopv2i64 addr:$src1)), imm:$src2))]>, OpSize; + +//===----------------------------------------------------------------------===// +// CLMUL Instructions +//===----------------------------------------------------------------------===// + +// Only the AVX version of CLMUL instructions are described here. + +// Carry-less Multiplication instructions +let isAsmParserOnly = 1 in { +def VPCLMULQDQrr : CLMULIi8<0x44, MRMSrcReg, (outs VR128:$dst), + (ins VR128:$src1, VR128:$src2, i8imm:$src3), + "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>; + +def VPCLMULQDQrm : CLMULIi8<0x44, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, i128mem:$src2, i8imm:$src3), + "vpclmulqdq\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>; + +// Assembler Only +multiclass avx_vpclmul<string asm> { + def rr : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, VR128:$src2), + !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; + + def rm : I<0, Pseudo, (outs VR128:$dst), (ins VR128:$src1, i128mem:$src2), + !strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + []>; +} +defm VPCLMULHQHQDQ : avx_vpclmul<"vpclmulhqhqdq">; +defm VPCLMULHQLQDQ : avx_vpclmul<"vpclmulhqlqdq">; +defm VPCLMULLQHQDQ : avx_vpclmul<"vpclmullqhqdq">; +defm VPCLMULLQLQDQ : avx_vpclmul<"vpclmullqlqdq">; + +} // isAsmParserOnly + +//===----------------------------------------------------------------------===// +// AVX Instructions +//===----------------------------------------------------------------------===// + +let isAsmParserOnly = 1 in { + +// Load from memory and broadcast to all elements of the destination operand +class avx_broadcast<bits<8> opc, string OpcodeStr, RegisterClass RC, + X86MemOperand x86memop, Intrinsic Int> : + AVX8I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src), + !strconcat(OpcodeStr, "\t{$src, $dst|$dst, $src}"), + [(set RC:$dst, (Int addr:$src))]>, VEX; + +def VBROADCASTSS : avx_broadcast<0x18, "vbroadcastss", VR128, f32mem, + int_x86_avx_vbroadcastss>; +def VBROADCASTSSY : avx_broadcast<0x18, "vbroadcastss", VR256, f32mem, + int_x86_avx_vbroadcastss_256>; +def VBROADCASTSD : avx_broadcast<0x19, "vbroadcastsd", VR256, f64mem, + int_x86_avx_vbroadcast_sd_256>; +def VBROADCASTF128 : avx_broadcast<0x1A, "vbroadcastf128", VR256, f128mem, + int_x86_avx_vbroadcastf128_pd_256>; + +// Insert packed floating-point values +def VINSERTF128rr : AVXAIi8<0x18, MRMSrcReg, (outs VR256:$dst), + (ins VR256:$src1, VR128:$src2, i8imm:$src3), + "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, VEX_4V; +def VINSERTF128rm : AVXAIi8<0x18, MRMSrcMem, (outs VR256:$dst), + (ins VR256:$src1, f128mem:$src2, i8imm:$src3), + "vinsertf128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, VEX_4V; + +// Extract packed floating-point values +def VEXTRACTF128rr : AVXAIi8<0x19, MRMDestReg, (outs VR128:$dst), + (ins VR256:$src1, i8imm:$src2), + "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, VEX; +def VEXTRACTF128mr : AVXAIi8<0x19, MRMDestMem, (outs), + (ins f128mem:$dst, VR256:$src1, i8imm:$src2), + "vextractf128\t{$src2, $src1, $dst|$dst, $src1, $src2}", + []>, VEX; + +// Conditional SIMD Packed Loads and Stores +multiclass avx_movmask_rm<bits<8> opc_rm, bits<8> opc_mr, string OpcodeStr, + Intrinsic IntLd, Intrinsic IntLd256, + Intrinsic IntSt, Intrinsic IntSt256, + PatFrag pf128, PatFrag pf256> { + def rm : AVX8I<opc_rm, MRMSrcMem, (outs VR128:$dst), + (ins VR128:$src1, f128mem:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set VR128:$dst, (IntLd addr:$src2, VR128:$src1))]>, + VEX_4V; + def Yrm : AVX8I<opc_rm, MRMSrcMem, (outs VR256:$dst), + (ins VR256:$src1, f256mem:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set VR256:$dst, (IntLd256 addr:$src2, VR256:$src1))]>, + VEX_4V; + def mr : AVX8I<opc_mr, MRMDestMem, (outs), + (ins f128mem:$dst, VR128:$src1, VR128:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(IntSt addr:$dst, VR128:$src1, VR128:$src2)]>, VEX_4V; + def Ymr : AVX8I<opc_mr, MRMDestMem, (outs), + (ins f256mem:$dst, VR256:$src1, VR256:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(IntSt256 addr:$dst, VR256:$src1, VR256:$src2)]>, VEX_4V; +} + +defm VMASKMOVPS : avx_movmask_rm<0x2C, 0x2E, "vmaskmovps", + int_x86_avx_maskload_ps, + int_x86_avx_maskload_ps_256, + int_x86_avx_maskstore_ps, + int_x86_avx_maskstore_ps_256, + memopv4f32, memopv8f32>; +defm VMASKMOVPD : avx_movmask_rm<0x2D, 0x2F, "vmaskmovpd", + int_x86_avx_maskload_pd, + int_x86_avx_maskload_pd_256, + int_x86_avx_maskstore_pd, + int_x86_avx_maskstore_pd_256, + memopv2f64, memopv4f64>; + +// Permute Floating-Point Values +multiclass avx_permil<bits<8> opc_rm, bits<8> opc_rmi, string OpcodeStr, + RegisterClass RC, X86MemOperand x86memop_f, + X86MemOperand x86memop_i, PatFrag f_frag, PatFrag i_frag, + Intrinsic IntVar, Intrinsic IntImm> { + def rr : AVX8I<opc_rm, MRMSrcReg, (outs RC:$dst), + (ins RC:$src1, RC:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set RC:$dst, (IntVar RC:$src1, RC:$src2))]>, VEX_4V; + def rm : AVX8I<opc_rm, MRMSrcMem, (outs RC:$dst), + (ins RC:$src1, x86memop_i:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set RC:$dst, (IntVar RC:$src1, (i_frag addr:$src2)))]>, VEX_4V; + + def ri : AVXAIi8<opc_rmi, MRMSrcReg, (outs RC:$dst), + (ins RC:$src1, i8imm:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set RC:$dst, (IntImm RC:$src1, imm:$src2))]>, VEX; + def mi : AVXAIi8<opc_rmi, MRMSrcMem, (outs RC:$dst), + (ins x86memop_f:$src1, i8imm:$src2), + !strconcat(OpcodeStr, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"), + [(set RC:$dst, (IntImm (f_frag addr:$src1), imm:$src2))]>, VEX; +} + +defm VPERMILPS : avx_permil<0x0C, 0x04, "vpermilps", VR128, f128mem, i128mem, + memopv4f32, memopv4i32, + int_x86_avx_vpermilvar_ps, + int_x86_avx_vpermil_ps>; +defm VPERMILPSY : avx_permil<0x0C, 0x04, "vpermilps", VR256, f256mem, i256mem, + memopv8f32, memopv8i32, + int_x86_avx_vpermilvar_ps_256, + int_x86_avx_vpermil_ps_256>; +defm VPERMILPD : avx_permil<0x0D, 0x05, "vpermilpd", VR128, f128mem, i128mem, + memopv2f64, memopv2i64, + int_x86_avx_vpermilvar_pd, + int_x86_avx_vpermil_pd>; +defm VPERMILPDY : avx_permil<0x0D, 0x05, "vpermilpd", VR256, f256mem, i256mem, + memopv4f64, memopv4i64, + int_x86_avx_vpermilvar_pd_256, + int_x86_avx_vpermil_pd_256>; + +def VPERM2F128rr : AVXAIi8<0x06, MRMSrcReg, (outs VR256:$dst), + (ins VR256:$src1, VR256:$src2, i8imm:$src3), + "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, VEX_4V; +def VPERM2F128rm : AVXAIi8<0x06, MRMSrcMem, (outs VR256:$dst), + (ins VR256:$src1, f256mem:$src2, i8imm:$src3), + "vperm2f128\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}", + []>, VEX_4V; + +// Zero All YMM registers +def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall", + [(int_x86_avx_vzeroall)]>, VEX, VEX_L, Requires<[HasAVX]>; + +// Zero Upper bits of YMM registers +def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper", + [(int_x86_avx_vzeroupper)]>, VEX, Requires<[HasAVX]>; + +} // isAsmParserOnly + +def : Pat<(int_x86_avx_vinsertf128_pd_256 VR256:$src1, VR128:$src2, imm:$src3), + (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vinsertf128_ps_256 VR256:$src1, VR128:$src2, imm:$src3), + (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vinsertf128_si_256 VR256:$src1, VR128:$src2, imm:$src3), + (VINSERTF128rr VR256:$src1, VR128:$src2, imm:$src3)>; + +def : Pat<(int_x86_avx_vextractf128_pd_256 VR256:$src1, imm:$src2), + (VEXTRACTF128rr VR256:$src1, imm:$src2)>; +def : Pat<(int_x86_avx_vextractf128_ps_256 VR256:$src1, imm:$src2), + (VEXTRACTF128rr VR256:$src1, imm:$src2)>; +def : Pat<(int_x86_avx_vextractf128_si_256 VR256:$src1, imm:$src2), + (VEXTRACTF128rr VR256:$src1, imm:$src2)>; + +def : Pat<(int_x86_avx_vbroadcastf128_ps_256 addr:$src), + (VBROADCASTF128 addr:$src)>; + +def : Pat<(int_x86_avx_vperm2f128_ps_256 VR256:$src1, VR256:$src2, imm:$src3), + (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vperm2f128_pd_256 VR256:$src1, VR256:$src2, imm:$src3), + (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vperm2f128_si_256 VR256:$src1, VR256:$src2, imm:$src3), + (VPERM2F128rr VR256:$src1, VR256:$src2, imm:$src3)>; + +def : Pat<(int_x86_avx_vperm2f128_ps_256 + VR256:$src1, (memopv8f32 addr:$src2), imm:$src3), + (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vperm2f128_pd_256 + VR256:$src1, (memopv4f64 addr:$src2), imm:$src3), + (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>; +def : Pat<(int_x86_avx_vperm2f128_si_256 + VR256:$src1, (memopv8i32 addr:$src2), imm:$src3), + (VPERM2F128rm VR256:$src1, addr:$src2, imm:$src3)>; + +//===----------------------------------------------------------------------===// +// SSE Shuffle pattern fragments +//===----------------------------------------------------------------------===// + +// This is part of a "work in progress" refactoring. The idea is that all +// vector shuffles are going to be translated into target specific nodes and +// directly matched by the patterns below (which can be changed along the way) +// The AVX version of some but not all of them are described here, and more +// should come in a near future. + +// Shuffle with PSHUFD instruction folding loads. The first two patterns match +// SSE2 loads, which are always promoted to v2i64. The last one should match +// the SSE1 case, where the only legal load is v4f32, but there is no PSHUFD +// in SSE2, how does it ever worked? Anyway, the pattern will remain here until +// we investigate further. +def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)), + (i8 imm:$imm))), + (VPSHUFDmi addr:$src1, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv2i64 addr:$src1)), + (i8 imm:$imm))), + (PSHUFDmi addr:$src1, imm:$imm)>; +def : Pat<(v4i32 (X86PShufd (bc_v4i32 (memopv4f32 addr:$src1)), + (i8 imm:$imm))), + (PSHUFDmi addr:$src1, imm:$imm)>; // FIXME: has this ever worked? + +// Shuffle with PSHUFD instruction. +def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))), + (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86PShufd VR128:$src1, (i8 imm:$imm))), + (PSHUFDri VR128:$src1, imm:$imm)>; + +def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))), + (VPSHUFDri VR128:$src1, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4i32 (X86PShufd VR128:$src1, (i8 imm:$imm))), + (PSHUFDri VR128:$src1, imm:$imm)>; + +// Shuffle with SHUFPD instruction. +def : Pat<(v2f64 (X86Shufps VR128:$src1, + (memopv2f64 addr:$src2), (i8 imm:$imm))), + (VSHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Shufps VR128:$src1, + (memopv2f64 addr:$src2), (i8 imm:$imm))), + (SHUFPDrmi VR128:$src1, addr:$src2, imm:$imm)>; + +def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v2i64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>; + +def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (VSHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Shufpd VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (SHUFPDrri VR128:$src1, VR128:$src2, imm:$imm)>; + +// Shuffle with SHUFPS instruction. +def : Pat<(v4f32 (X86Shufps VR128:$src1, + (memopv4f32 addr:$src2), (i8 imm:$imm))), + (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Shufps VR128:$src1, + (memopv4f32 addr:$src2), (i8 imm:$imm))), + (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>; + +def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>; + +def : Pat<(v4i32 (X86Shufps VR128:$src1, + (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))), + (VSHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4i32 (X86Shufps VR128:$src1, + (bc_v4i32 (memopv2i64 addr:$src2)), (i8 imm:$imm))), + (SHUFPSrmi VR128:$src1, addr:$src2, imm:$imm)>; + +def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (VSHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>, Requires<[HasAVX]>; +def : Pat<(v4i32 (X86Shufps VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (SHUFPSrri VR128:$src1, VR128:$src2, imm:$imm)>; + +// Shuffle with MOVHLPS instruction +def : Pat<(v4f32 (X86Movhlps VR128:$src1, VR128:$src2)), + (MOVHLPSrr VR128:$src1, VR128:$src2)>; +def : Pat<(v4i32 (X86Movhlps VR128:$src1, VR128:$src2)), + (MOVHLPSrr VR128:$src1, VR128:$src2)>; + +// Shuffle with MOVDDUP instruction +def : Pat<(X86Movddup (memopv2f64 addr:$src)), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (memopv2f64 addr:$src)), + (MOVDDUPrm addr:$src)>; + +def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (bc_v4f32 (memopv2f64 addr:$src))), + (MOVDDUPrm addr:$src)>; + +def : Pat<(X86Movddup (memopv2i64 addr:$src)), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (memopv2i64 addr:$src)), + (MOVDDUPrm addr:$src)>; + +def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (bc_v4i32 (memopv2i64 addr:$src))), + (MOVDDUPrm addr:$src)>; + +def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (v2f64 (scalar_to_vector (loadf64 addr:$src)))), + (MOVDDUPrm addr:$src)>; + +def : Pat<(X86Movddup (bc_v2f64 + (v2i64 (scalar_to_vector (loadi64 addr:$src))))), + (VMOVDDUPrm addr:$src)>, Requires<[HasAVX]>; +def : Pat<(X86Movddup (bc_v2f64 + (v2i64 (scalar_to_vector (loadi64 addr:$src))))), + (MOVDDUPrm addr:$src)>; + +// Shuffle with UNPCKLPS +def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))), + (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Unpcklps VR128:$src1, (memopv4f32 addr:$src2))), + (UNPCKLPSrm VR128:$src1, addr:$src2)>; + +def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)), + (VUNPCKLPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Unpcklps VR128:$src1, VR128:$src2)), + (UNPCKLPSrr VR128:$src1, VR128:$src2)>; + +// Shuffle with UNPCKHPS +def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))), + (VUNPCKHPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Unpckhps VR128:$src1, (memopv4f32 addr:$src2))), + (UNPCKHPSrm VR128:$src1, addr:$src2)>; + +def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)), + (VUNPCKHPSrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>; +def : Pat<(v4f32 (X86Unpckhps VR128:$src1, VR128:$src2)), + (UNPCKHPSrr VR128:$src1, VR128:$src2)>; + +// Shuffle with UNPCKLPD +def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))), + (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, (memopv2f64 addr:$src2))), + (UNPCKLPSrm VR128:$src1, addr:$src2)>; + +def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)), + (VUNPCKLPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, VR128:$src2)), + (UNPCKLPDrr VR128:$src1, VR128:$src2)>; + +// Shuffle with UNPCKHPD +def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))), + (VUNPCKLPSrm VR128:$src1, addr:$src2)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, (memopv2f64 addr:$src2))), + (UNPCKLPSrm VR128:$src1, addr:$src2)>; + +def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)), + (VUNPCKHPDrr VR128:$src1, VR128:$src2)>, Requires<[HasAVX]>; +def : Pat<(v2f64 (X86Unpckhpd VR128:$src1, VR128:$src2)), + (UNPCKHPDrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKLBW +def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, + (bc_v16i8 (memopv2i64 addr:$src2)))), + (PUNPCKLBWrm VR128:$src1, addr:$src2)>; +def : Pat<(v16i8 (X86Punpcklbw VR128:$src1, VR128:$src2)), + (PUNPCKLBWrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKLWD +def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, + (bc_v8i16 (memopv2i64 addr:$src2)))), + (PUNPCKLWDrm VR128:$src1, addr:$src2)>; +def : Pat<(v8i16 (X86Punpcklwd VR128:$src1, VR128:$src2)), + (PUNPCKLWDrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKLDQ +def : Pat<(v4i32 (X86Punpckldq VR128:$src1, + (bc_v4i32 (memopv2i64 addr:$src2)))), + (PUNPCKLDQrm VR128:$src1, addr:$src2)>; +def : Pat<(v4i32 (X86Punpckldq VR128:$src1, VR128:$src2)), + (PUNPCKLDQrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKLQDQ +def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, (memopv2i64 addr:$src2))), + (PUNPCKLQDQrm VR128:$src1, addr:$src2)>; +def : Pat<(v2i64 (X86Punpcklqdq VR128:$src1, VR128:$src2)), + (PUNPCKLQDQrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKHBW +def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, + (bc_v16i8 (memopv2i64 addr:$src2)))), + (PUNPCKHBWrm VR128:$src1, addr:$src2)>; +def : Pat<(v16i8 (X86Punpckhbw VR128:$src1, VR128:$src2)), + (PUNPCKHBWrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKHWD +def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, + (bc_v8i16 (memopv2i64 addr:$src2)))), + (PUNPCKHWDrm VR128:$src1, addr:$src2)>; +def : Pat<(v8i16 (X86Punpckhwd VR128:$src1, VR128:$src2)), + (PUNPCKHWDrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKHDQ +def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, + (bc_v4i32 (memopv2i64 addr:$src2)))), + (PUNPCKHDQrm VR128:$src1, addr:$src2)>; +def : Pat<(v4i32 (X86Punpckhdq VR128:$src1, VR128:$src2)), + (PUNPCKHDQrr VR128:$src1, VR128:$src2)>; + +// Shuffle with PUNPCKHQDQ +def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, (memopv2i64 addr:$src2))), + (PUNPCKHQDQrm VR128:$src1, addr:$src2)>; +def : Pat<(v2i64 (X86Punpckhqdq VR128:$src1, VR128:$src2)), + (PUNPCKHQDQrr VR128:$src1, VR128:$src2)>; + +// Shuffle with MOVLHPS +def : Pat<(X86Movlhps VR128:$src1, + (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))), + (MOVHPSrm VR128:$src1, addr:$src2)>; +def : Pat<(X86Movlhps VR128:$src1, + (bc_v4i32 (v2i64 (X86vzload addr:$src2)))), + (MOVHPSrm VR128:$src1, addr:$src2)>; +def : Pat<(v4f32 (X86Movlhps VR128:$src1, VR128:$src2)), + (MOVLHPSrr VR128:$src1, VR128:$src2)>; +def : Pat<(v4i32 (X86Movlhps VR128:$src1, VR128:$src2)), + (MOVLHPSrr VR128:$src1, VR128:$src2)>; +def : Pat<(v2i64 (X86Movlhps VR128:$src1, VR128:$src2)), + (MOVLHPSrr (v2i64 VR128:$src1), VR128:$src2)>; + +// Shuffle with MOVLHPD +def : Pat<(v2f64 (X86Movlhpd VR128:$src1, + (scalar_to_vector (loadf64 addr:$src2)))), + (MOVHPDrm VR128:$src1, addr:$src2)>; +// FIXME: Instead of X86Unpcklpd, there should be a X86Movlhpd here, the problem +// is during lowering, where it's not possible to recognize the load fold cause +// it has two uses through a bitcast. One use disappears at isel time and the +// fold opportunity reappears. +def : Pat<(v2f64 (X86Unpcklpd VR128:$src1, + (scalar_to_vector (loadf64 addr:$src2)))), + (MOVHPDrm VR128:$src1, addr:$src2)>; + +// Shuffle with MOVSS +def : Pat<(v4f32 (X86Movss VR128:$src1, (scalar_to_vector FR32:$src2))), + (MOVSSrr VR128:$src1, FR32:$src2)>; +def : Pat<(v4i32 (X86Movss VR128:$src1, VR128:$src2)), + (MOVSSrr (v4i32 VR128:$src1), + (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>; +def : Pat<(v4f32 (X86Movss VR128:$src1, VR128:$src2)), + (MOVSSrr (v4f32 VR128:$src1), + (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>; +// FIXME: Instead of a X86Movss there should be a X86Movlps here, the problem +// is during lowering, where it's not possible to recognize the load fold cause +// it has two uses through a bitcast. One use disappears at isel time and the +// fold opportunity reappears. +def : Pat<(X86Movss VR128:$src1, + (bc_v4i32 (v2i64 (load addr:$src2)))), + (MOVLPSrm VR128:$src1, addr:$src2)>; + +// Shuffle with MOVSD +def : Pat<(v2f64 (X86Movsd VR128:$src1, (scalar_to_vector FR64:$src2))), + (MOVSDrr VR128:$src1, FR64:$src2)>; +def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)), + (MOVSDrr (v2i64 VR128:$src1), + (EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>; +def : Pat<(v2f64 (X86Movsd VR128:$src1, VR128:$src2)), + (MOVSDrr (v2f64 VR128:$src1), + (EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>; +def : Pat<(v4f32 (X86Movsd VR128:$src1, VR128:$src2)), + (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4f32 VR128:$src2), sub_sd))>; +def : Pat<(v4i32 (X86Movsd VR128:$src1, VR128:$src2)), + (MOVSDrr VR128:$src1, (EXTRACT_SUBREG (v4i32 VR128:$src2), sub_sd))>; + +// Shuffle with MOVSHDUP +def : Pat<(v4i32 (X86Movshdup VR128:$src)), + (MOVSHDUPrr VR128:$src)>; +def : Pat<(X86Movshdup (bc_v4i32 (memopv2i64 addr:$src))), + (MOVSHDUPrm addr:$src)>; + +def : Pat<(v4f32 (X86Movshdup VR128:$src)), + (MOVSHDUPrr VR128:$src)>; +def : Pat<(X86Movshdup (memopv4f32 addr:$src)), + (MOVSHDUPrm addr:$src)>; + +// Shuffle with MOVSLDUP +def : Pat<(v4i32 (X86Movsldup VR128:$src)), + (MOVSLDUPrr VR128:$src)>; +def : Pat<(X86Movsldup (bc_v4i32 (memopv2i64 addr:$src))), + (MOVSLDUPrm addr:$src)>; + +def : Pat<(v4f32 (X86Movsldup VR128:$src)), + (MOVSLDUPrr VR128:$src)>; +def : Pat<(X86Movsldup (memopv4f32 addr:$src)), + (MOVSLDUPrm addr:$src)>; + +// Shuffle with PSHUFHW +def : Pat<(v8i16 (X86PShufhwLd addr:$src, (i8 imm:$imm))), + (PSHUFHWmi addr:$src, imm:$imm)>; +def : Pat<(v8i16 (X86PShufhw VR128:$src, (i8 imm:$imm))), + (PSHUFHWri VR128:$src, imm:$imm)>; +def : Pat<(v8i16 (X86PShufhw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))), + (PSHUFHWmi addr:$src, imm:$imm)>; + +// Shuffle with PSHUFLW +def : Pat<(v8i16 (X86PShuflwLd addr:$src, (i8 imm:$imm))), + (PSHUFLWmi addr:$src, imm:$imm)>; +def : Pat<(v8i16 (X86PShuflw VR128:$src, (i8 imm:$imm))), + (PSHUFLWri VR128:$src, imm:$imm)>; +def : Pat<(v8i16 (X86PShuflw (bc_v8i16 (memopv2i64 addr:$src)), (i8 imm:$imm))), + (PSHUFLWmi addr:$src, imm:$imm)>; + +// Shuffle with PALIGN +def : Pat<(v1i64 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))), + (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>; +def : Pat<(v2i32 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))), + (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>; +def : Pat<(v4i16 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))), + (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>; +def : Pat<(v8i8 (X86PAlign VR64:$src1, VR64:$src2, (i8 imm:$imm))), + (PALIGNR64rr VR64:$src2, VR64:$src1, imm:$imm)>; + +def : Pat<(v4i32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; +def : Pat<(v4f32 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; +def : Pat<(v8i16 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; +def : Pat<(v16i8 (X86PAlign VR128:$src1, VR128:$src2, (i8 imm:$imm))), + (PALIGNR128rr VR128:$src2, VR128:$src1, imm:$imm)>; + +// Shuffle with MOVLPS +def : Pat<(v4f32 (X86Movlps VR128:$src1, (load addr:$src2))), + (MOVLPSrm VR128:$src1, addr:$src2)>; +def : Pat<(v4i32 (X86Movlps VR128:$src1, (load addr:$src2))), + (MOVLPSrm VR128:$src1, addr:$src2)>; +def : Pat<(X86Movlps VR128:$src1, + (bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))), + (MOVLPSrm VR128:$src1, addr:$src2)>; + +// Shuffle with MOVLPD +def : Pat<(v2f64 (X86Movlpd VR128:$src1, (load addr:$src2))), + (MOVLPDrm VR128:$src1, addr:$src2)>; +def : Pat<(v2i64 (X86Movlpd VR128:$src1, (load addr:$src2))), + (MOVLPDrm VR128:$src1, addr:$src2)>; +def : Pat<(v2f64 (X86Movlpd VR128:$src1, + (scalar_to_vector (loadf64 addr:$src2)))), + (MOVLPDrm VR128:$src1, addr:$src2)>; + +// Extra patterns to match stores with MOVHPS/PD and MOVLPS/PD +def : Pat<(store (f64 (vector_extract + (v2f64 (X86Unpckhps VR128:$src, (undef))), (iPTR 0))),addr:$dst), + (MOVHPSmr addr:$dst, VR128:$src)>; +def : Pat<(store (f64 (vector_extract + (v2f64 (X86Unpckhpd VR128:$src, (undef))), (iPTR 0))),addr:$dst), + (MOVHPDmr addr:$dst, VR128:$src)>; + +def : Pat<(store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)),addr:$src1), + (MOVLPSmr addr:$src1, VR128:$src2)>; +def : Pat<(store (v4i32 (X86Movlps + (bc_v4i32 (loadv2i64 addr:$src1)), VR128:$src2)), addr:$src1), + (MOVLPSmr addr:$src1, VR128:$src2)>; + +def : Pat<(store (v2f64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1), + (MOVLPDmr addr:$src1, VR128:$src2)>; +def : Pat<(store (v2i64 (X86Movlpd (load addr:$src1), VR128:$src2)),addr:$src1), + (MOVLPDmr addr:$src1, VR128:$src2)>; diff --git a/lib/Target/X86/X86MCAsmInfo.cpp b/lib/Target/X86/X86MCAsmInfo.cpp index 2b8720bac343..36badb403e81 100644 --- a/lib/Target/X86/X86MCAsmInfo.cpp +++ b/lib/Target/X86/X86MCAsmInfo.cpp @@ -103,6 +103,9 @@ getNonexecutableStackSection(MCContext &Ctx) const { } X86MCAsmInfoCOFF::X86MCAsmInfoCOFF(const Triple &Triple) { + if (Triple.getArch() == Triple::x86_64) + GlobalPrefix = ""; + AsmTransCBE = x86_asm_table; AssemblerDialect = AsmWriterFlavor; diff --git a/lib/Target/X86/X86MCCodeEmitter.cpp b/lib/Target/X86/X86MCCodeEmitter.cpp index 23b0666f5f30..9564fe0b92d4 100644 --- a/lib/Target/X86/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/X86MCCodeEmitter.cpp @@ -365,7 +365,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, const TargetInstrDesc &Desc, raw_ostream &OS) const { bool HasVEX_4V = false; - if (TSFlags & X86II::VEX_4V) + if ((TSFlags >> 32) & X86II::VEX_4V) HasVEX_4V = true; // VEX_R: opcode externsion equivalent to REX.R in @@ -429,10 +429,10 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, if (TSFlags & X86II::OpSize) VEX_PP = 0x01; - if (TSFlags & X86II::VEX_W) + if ((TSFlags >> 32) & X86II::VEX_W) VEX_W = 1; - if (TSFlags & X86II::VEX_L) + if ((TSFlags >> 32) & X86II::VEX_L) VEX_L = 1; switch (TSFlags & X86II::Op0Mask) { @@ -469,33 +469,39 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, unsigned NumOps = MI.getNumOperands(); unsigned CurOp = 0; + bool IsDestMem = false; switch (TSFlags & X86II::FormMask) { case X86II::MRMInitReg: assert(0 && "FIXME: Remove this!"); + case X86II::MRMDestMem: + IsDestMem = true; + // The important info for the VEX prefix is never beyond the address + // registers. Don't check beyond that. + NumOps = CurOp = X86::AddrNumOperands; case X86II::MRM0m: case X86II::MRM1m: case X86II::MRM2m: case X86II::MRM3m: case X86II::MRM4m: case X86II::MRM5m: case X86II::MRM6m: case X86II::MRM7m: - case X86II::MRMDestMem: - NumOps = CurOp = X86::AddrNumOperands; case X86II::MRMSrcMem: case X86II::MRMSrcReg: if (MI.getNumOperands() > CurOp && MI.getOperand(CurOp).isReg() && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_R = 0x0; - - // CurOp and NumOps are equal when VEX_R represents a register used - // to index a memory destination (which is the last operand) - CurOp = (CurOp == NumOps) ? 0 : CurOp+1; + CurOp++; if (HasVEX_4V) { - VEX_4V = getVEXRegisterEncoding(MI, CurOp); + VEX_4V = getVEXRegisterEncoding(MI, IsDestMem ? CurOp-1 : CurOp); CurOp++; } + // To only check operands before the memory address ones, start + // the search from the begining + if (IsDestMem) + CurOp = 0; + // If the last register should be encoded in the immediate field // do not use any bit from VEX prefix to this register, ignore it - if (TSFlags & X86II::VEX_I8IMM) + if ((TSFlags >> 32) & X86II::VEX_I8IMM) NumOps--; for (; CurOp != NumOps; ++CurOp) { @@ -508,7 +514,10 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, VEX_X = 0x0; } break; - default: // MRMDestReg, MRM0r-MRM7r + default: // MRMDestReg, MRM0r-MRM7r, RawFrm + if (!MI.getNumOperands()) + break; + if (MI.getOperand(CurOp).isReg() && X86InstrInfo::isX86_64ExtendedReg(MI.getOperand(CurOp).getReg())) VEX_B = 0; @@ -524,7 +533,6 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, VEX_R = 0x0; } break; - assert(0 && "Not implemented!"); } // Emit segment override opcode prefix as needed. @@ -793,9 +801,9 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, // It uses the VEX.VVVV field? bool HasVEX_4V = false; - if (TSFlags & X86II::VEX) + if ((TSFlags >> 32) & X86II::VEX) HasVEXPrefix = true; - if (TSFlags & X86II::VEX_4V) + if ((TSFlags >> 32) & X86II::VEX_4V) HasVEX_4V = true; // Determine where the memory operand starts, if present. @@ -819,6 +827,14 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::RawFrm: EmitByte(BaseOpcode, CurByte, OS); break; + + case X86II::RawFrmImm16: + EmitByte(BaseOpcode, CurByte, OS); + EmitImmediate(MI.getOperand(CurOp++), + X86II::getSizeOfImm(TSFlags), getImmFixupKind(TSFlags), + CurByte, OS, Fixups); + EmitImmediate(MI.getOperand(CurOp++), 2, FK_Data_2, CurByte, OS, Fixups); + break; case X86II::AddRegFrm: EmitByte(BaseOpcode + GetX86RegNum(MI.getOperand(CurOp++)), CurByte, OS); @@ -833,10 +849,15 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, case X86II::MRMDestMem: EmitByte(BaseOpcode, CurByte, OS); + SrcRegNum = CurOp + X86::AddrNumOperands; + + if (HasVEX_4V) // Skip 1st src (which is encoded in VEX_VVVV) + SrcRegNum++; + EmitMemModRMByte(MI, CurOp, - GetX86RegNum(MI.getOperand(CurOp + X86::AddrNumOperands)), + GetX86RegNum(MI.getOperand(SrcRegNum)), TSFlags, CurByte, OS, Fixups); - CurOp += X86::AddrNumOperands + 1; + CurOp = SrcRegNum + 1; break; case X86II::MRMSrcReg: @@ -934,7 +955,7 @@ EncodeInstruction(const MCInst &MI, raw_ostream &OS, if (CurOp != NumOps) { // The last source register of a 4 operand instruction in AVX is encoded // in bits[7:4] of a immediate byte, and bits[3:0] are ignored. - if (TSFlags & X86II::VEX_I8IMM) { + if ((TSFlags >> 32) & X86II::VEX_I8IMM) { const MCOperand &MO = MI.getOperand(CurOp++); bool IsExtReg = X86InstrInfo::isX86_64ExtendedReg(MO.getReg()); diff --git a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp b/lib/Target/X86/X86MCInstLower.cpp index e67fc06a6cd7..8c4620f92177 100644 --- a/lib/Target/X86/AsmPrinter/X86MCInstLower.cpp +++ b/lib/Target/X86/X86MCInstLower.cpp @@ -16,7 +16,6 @@ #include "X86AsmPrinter.h" #include "X86COFFMachineModuleInfo.h" #include "X86MCAsmInfo.h" -#include "llvm/Analysis/DebugInfo.h" #include "llvm/CodeGen/MachineModuleInfoImpls.h" #include "llvm/MC/MCContext.h" #include "llvm/MC/MCExpr.h" @@ -29,21 +28,19 @@ #include "llvm/Type.h" using namespace llvm; - -const X86Subtarget &X86MCInstLower::getSubtarget() const { - return AsmPrinter.getSubtarget(); -} +X86MCInstLower::X86MCInstLower(Mangler *mang, const MachineFunction &mf, + X86AsmPrinter &asmprinter) +: Ctx(mf.getContext()), Mang(mang), MF(mf), TM(mf.getTarget()), + MAI(*TM.getMCAsmInfo()), AsmPrinter(asmprinter) {} MachineModuleInfoMachO &X86MCInstLower::getMachOMMI() const { - assert(getSubtarget().isTargetDarwin() &&"Can only get MachO info on darwin"); - return AsmPrinter.MMI->getObjFileInfo<MachineModuleInfoMachO>(); + return MF.getMMI().getObjFileInfo<MachineModuleInfoMachO>(); } MCSymbol *X86MCInstLower::GetPICBaseSymbol() const { - const TargetLowering *TLI = AsmPrinter.TM.getTargetLowering(); - return static_cast<const X86TargetLowering*>(TLI)-> - getPICBaseSymbol(AsmPrinter.MF, Ctx); + return static_cast<const X86TargetLowering*>(TM.getTargetLowering())-> + getPICBaseSymbol(&MF, Ctx); } /// GetSymbolFromOperand - Lower an MO_GlobalAddress or MO_ExternalSymbol @@ -56,7 +53,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { if (!MO.isGlobal()) { assert(MO.isSymbol()); - Name += AsmPrinter.MAI->getGlobalPrefix(); + Name += MAI.getGlobalPrefix(); Name += MO.getSymbolName(); } else { const GlobalValue *GV = MO.getGlobal(); @@ -91,7 +88,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { assert(MO.isGlobal() && "Extern symbol not handled yet"); StubSym = MachineModuleInfoImpl:: - StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()), + StubValueTy(Mang->getSymbol(MO.getGlobal()), !MO.getGlobal()->hasInternalLinkage()); } return Sym; @@ -105,7 +102,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { assert(MO.isGlobal() && "Extern symbol not handled yet"); StubSym = MachineModuleInfoImpl:: - StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()), + StubValueTy(Mang->getSymbol(MO.getGlobal()), !MO.getGlobal()->hasInternalLinkage()); } return Sym; @@ -121,7 +118,7 @@ GetSymbolFromOperand(const MachineOperand &MO) const { if (MO.isGlobal()) { StubSym = MachineModuleInfoImpl:: - StubValueTy(AsmPrinter.Mang->getSymbol(MO.getGlobal()), + StubValueTy(Mang->getSymbol(MO.getGlobal()), !MO.getGlobal()->hasInternalLinkage()); } else { Name.erase(Name.end()-5, Name.end()); @@ -178,7 +175,7 @@ MCOperand X86MCInstLower::LowerSymbolOperand(const MachineOperand &MO, Expr = MCBinaryExpr::CreateSub(Expr, MCSymbolRefExpr::Create(GetPICBaseSymbol(), Ctx), Ctx); - if (MO.isJTI() && AsmPrinter.MAI->hasSetDirective()) { + if (MO.isJTI() && MAI.hasSetDirective()) { // If .set directive is supported, use it to reduce the number of // relocations the assembler will generate for differences between // local labels. This is only safe when the symbols are in the same @@ -255,7 +252,13 @@ static void SimplifyShortImmForm(MCInst &Inst, unsigned Opcode) { } /// \brief Simplify things like MOV32rm to MOV32o32a. -static void SimplifyShortMoveForm(MCInst &Inst, unsigned Opcode) { +static void SimplifyShortMoveForm(X86AsmPrinter &Printer, MCInst &Inst, + unsigned Opcode) { + // Don't make these simplifications in 64-bit mode; other assemblers don't + // perform them because they make the code larger. + if (Printer.getSubtarget().is64Bit()) + return; + bool IsStore = Inst.getOperand(0).isReg() && Inst.getOperand(1).isReg(); unsigned AddrBase = IsStore; unsigned RegOp = IsStore ? 0 : 5; @@ -336,7 +339,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { break; case MachineOperand::MO_BlockAddress: MCOp = LowerSymbolOperand(MO, - AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); + AsmPrinter.GetBlockAddressSymbol(MO.getBlockAddress())); break; } @@ -377,12 +380,17 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { case X86::MMX_V_SET0: LowerUnaryToTwoAddr(OutMI, X86::MMX_PXORrr); break; case X86::MMX_V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::MMX_PCMPEQDrr); break; - case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; - case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; - case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break; - case X86::V_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::XORPDrr); break; - case X86::V_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; - case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break; + case X86::FsFLD0SS: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; + case X86::FsFLD0SD: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; + case X86::V_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::XORPSrr); break; + case X86::V_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::XORPDrr); break; + case X86::V_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::PXORrr); break; + case X86::V_SETALLONES: LowerUnaryToTwoAddr(OutMI, X86::PCMPEQDrr); break; + case X86::AVX_SET0PS: LowerUnaryToTwoAddr(OutMI, X86::VXORPSrr); break; + case X86::AVX_SET0PSY: LowerUnaryToTwoAddr(OutMI, X86::VXORPSYrr); break; + case X86::AVX_SET0PD: LowerUnaryToTwoAddr(OutMI, X86::VXORPDrr); break; + case X86::AVX_SET0PDY: LowerUnaryToTwoAddr(OutMI, X86::VXORPDYrr); break; + case X86::AVX_SET0PI: LowerUnaryToTwoAddr(OutMI, X86::VPXORrr); break; case X86::MOV16r0: LowerSubReg32_Op0(OutMI, X86::MOV32r0); // MOV16r0 -> MOV32r0 @@ -393,12 +401,14 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { LowerUnaryToTwoAddr(OutMI, X86::XOR32rr); // MOV32r0 -> XOR32rr break; - // TAILJMPr64, CALL64r, CALL64pcrel32 - These instructions have + // TAILJMPr64, [WIN]CALL64r, [WIN]CALL64pcrel32 - These instructions have // register inputs modeled as normal uses instead of implicit uses. As such, // truncate off all but the first operand (the callee). FIXME: Change isel. case X86::TAILJMPr64: case X86::CALL64r: - case X86::CALL64pcrel32: { + case X86::CALL64pcrel32: + case X86::WINCALL64r: + case X86::WINCALL64pcrel32: { unsigned Opcode = OutMI.getOpcode(); MCOperand Saved = OutMI.getOperand(0); OutMI = MCInst(); @@ -456,15 +466,13 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { // MOV64ao8, MOV64o8a // XCHG16ar, XCHG32ar, XCHG64ar case X86::MOV8mr_NOREX: - case X86::MOV8mr: SimplifyShortMoveForm(OutMI, X86::MOV8ao8); break; + case X86::MOV8mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8ao8); break; case X86::MOV8rm_NOREX: - case X86::MOV8rm: SimplifyShortMoveForm(OutMI, X86::MOV8o8a); break; - case X86::MOV16mr: SimplifyShortMoveForm(OutMI, X86::MOV16ao16); break; - case X86::MOV16rm: SimplifyShortMoveForm(OutMI, X86::MOV16o16a); break; - case X86::MOV32mr: SimplifyShortMoveForm(OutMI, X86::MOV32ao32); break; - case X86::MOV32rm: SimplifyShortMoveForm(OutMI, X86::MOV32o32a); break; - case X86::MOV64mr: SimplifyShortMoveForm(OutMI, X86::MOV64ao64); break; - case X86::MOV64rm: SimplifyShortMoveForm(OutMI, X86::MOV64o64a); break; + case X86::MOV8rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV8o8a); break; + case X86::MOV16mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16ao16); break; + case X86::MOV16rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV16o16a); break; + case X86::MOV32mr: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32ao32); break; + case X86::MOV32rm: SimplifyShortMoveForm(AsmPrinter, OutMI, X86::MOV32o32a); break; case X86::ADC8ri: SimplifyShortImmForm(OutMI, X86::ADC8i8); break; case X86::ADC16ri: SimplifyShortImmForm(OutMI, X86::ADC16i16); break; @@ -505,46 +513,9 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const { } } -void X86AsmPrinter::PrintDebugValueComment(const MachineInstr *MI, - raw_ostream &O) { - // Only the target-dependent form of DBG_VALUE should get here. - // Referencing the offset and metadata as NOps-2 and NOps-1 is - // probably portable to other targets; frame pointer location is not. - unsigned NOps = MI->getNumOperands(); - assert(NOps==7); - O << '\t' << MAI->getCommentString() << "DEBUG_VALUE: "; - // cast away const; DIetc do not take const operands for some reason. - DIVariable V(const_cast<MDNode *>(MI->getOperand(NOps-1).getMetadata())); - if (V.getContext().isSubprogram()) - O << DISubprogram(V.getContext()).getDisplayName() << ":"; - O << V.getName(); - O << " <- "; - // Frame address. Currently handles register +- offset only. - O << '['; - if (MI->getOperand(0).isReg() && MI->getOperand(0).getReg()) - printOperand(MI, 0, O); - else - O << "undef"; - O << '+'; printOperand(MI, 3, O); - O << ']'; - O << "+"; - printOperand(MI, NOps-2, O); -} - -MachineLocation -X86AsmPrinter::getDebugValueLocation(const MachineInstr *MI) const { - MachineLocation Location; - assert (MI->getNumOperands() == 7 && "Invalid no. of machine operands!"); - // Frame address. Currently handles register +- offset only. - - if (MI->getOperand(0).isReg() && MI->getOperand(3).isImm()) - Location.set(MI->getOperand(0).getReg(), MI->getOperand(3).getImm()); - return Location; -} - void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { - X86MCInstLower MCInstLowering(OutContext, Mang, *this); + X86MCInstLower MCInstLowering(Mang, *MF, *this); switch (MI->getOpcode()) { case TargetOpcode::DBG_VALUE: if (isVerbose() && OutStreamer.hasRawTextSupport()) { @@ -555,6 +526,12 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) { } return; + // Emit nothing here but a comment if we can. + case X86::Int_MemBarrier: + if (OutStreamer.hasRawTextSupport()) + OutStreamer.EmitRawText(StringRef("\t#MEMBARRIER")); + return; + case X86::TAILJMPr: case X86::TAILJMPd: case X86::TAILJMPd64: diff --git a/lib/Target/X86/AsmPrinter/X86MCInstLower.h b/lib/Target/X86/X86MCInstLower.h index 9e5474fc81b3..539b09be6fd7 100644 --- a/lib/Target/X86/AsmPrinter/X86MCInstLower.h +++ b/lib/Target/X86/X86MCInstLower.h @@ -13,27 +13,30 @@ #include "llvm/Support/Compiler.h" namespace llvm { + class MCAsmInfo; class MCContext; class MCInst; class MCOperand; class MCSymbol; class MachineInstr; + class MachineFunction; class MachineModuleInfoMachO; class MachineOperand; class Mangler; + class TargetMachine; class X86AsmPrinter; - class X86Subtarget; /// X86MCInstLower - This class is used to lower an MachineInstr into an MCInst. class LLVM_LIBRARY_VISIBILITY X86MCInstLower { MCContext &Ctx; Mangler *Mang; + const MachineFunction &MF; + const TargetMachine &TM; + const MCAsmInfo &MAI; X86AsmPrinter &AsmPrinter; - - const X86Subtarget &getSubtarget() const; public: - X86MCInstLower(MCContext &ctx, Mangler *mang, X86AsmPrinter &asmprinter) - : Ctx(ctx), Mang(mang), AsmPrinter(asmprinter) {} + X86MCInstLower(Mangler *mang, const MachineFunction &MF, + X86AsmPrinter &asmprinter); void Lower(const MachineInstr *MI, MCInst &OutMI) const; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 5f31e00ebabd..fedd49ebb540 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -38,8 +38,15 @@ #include "llvm/ADT/BitVector.h" #include "llvm/ADT/STLExtras.h" #include "llvm/Support/ErrorHandling.h" +#include "llvm/Support/CommandLine.h" using namespace llvm; +static cl::opt<bool> +ForceStackAlign("force-align-stack", + cl::desc("Force align the stack to the minimum alignment" + " needed for the function."), + cl::init(false), cl::Hidden); + X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm, const TargetInstrInfo &tii) : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ? @@ -193,6 +200,12 @@ unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) { case X86::DR7: return 7; + // Pseudo index registers are equivalent to a "none" + // scaled index (See Intel Manual 2A, table 2-3) + case X86::EIZ: + case X86::RIZ: + return 4; + default: assert(isVirtualRegister(RegNo) && "Unknown physical register!"); llvm_unreachable("Register allocator hasn't allocated reg correctly yet!"); @@ -456,26 +469,29 @@ bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const { bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { const MachineFrameInfo *MFI = MF.getFrameInfo(); const Function *F = MF.getFunction(); - bool requiresRealignment = - RealignStack && ((MFI->getMaxAlignment() > StackAlign) || - F->hasFnAttr(Attribute::StackAlignment)); + bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) || + F->hasFnAttr(Attribute::StackAlignment)); // FIXME: Currently we don't support stack realignment for functions with // variable-sized allocas. - // FIXME: Temporary disable the error - it seems to be too conservative. + // FIXME: It's more complicated than this... if (0 && requiresRealignment && MFI->hasVarSizedObjects()) report_fatal_error( "Stack realignment in presense of dynamic allocas is not supported"); - - return (requiresRealignment && !MFI->hasVarSizedObjects()); + + // If we've requested that we force align the stack do so now. + if (ForceStackAlign) + return canRealignStack(MF); + + return requiresRealignment && canRealignStack(MF); } -bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const { +bool X86RegisterInfo::hasReservedCallFrame(const MachineFunction &MF) const { return !MF.getFrameInfo()->hasVarSizedObjects(); } -bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, - int &FrameIdx) const { +bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF, + unsigned Reg, int &FrameIdx) const { if (Reg == FramePtr && hasFP(MF)) { FrameIdx = MF.getFrameInfo()->getObjectIndexBegin(); return true; @@ -610,10 +626,9 @@ eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MBB.erase(I); } -unsigned +void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, - int SPAdj, FrameIndexValue *Value, - RegScavenger *RS) const{ + int SPAdj, RegScavenger *RS) const{ assert(SPAdj == 0 && "Unexpected"); unsigned i = 0; @@ -660,7 +675,6 @@ X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II, uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset(); MI.getOperand(i+3).setOffset(Offset); } - return 0; } void @@ -750,7 +764,7 @@ void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, } } -/// mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator. +/// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator. static void mergeSPUpdatesDown(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, @@ -901,6 +915,17 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { bool HasFP = hasFP(MF); DebugLoc DL; + // If we're forcing a stack realignment we can't rely on just the frame + // info, we need to know the ABI stack alignment as well in case we + // have a call out. Otherwise just make sure we have some alignment - we'll + // go with the minimum SlotSize. + if (ForceStackAlign) { + if (MFI->hasCalls()) + MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; + else if (MaxAlign < SlotSize) + MaxAlign = SlotSize; + } + // Add RETADDR move area to callee saved frame size. int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta(); if (TailCallReturnAddrDelta < 0) @@ -979,7 +1004,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { if (needsFrameMoves) { // Mark the place where EBP/RBP was saved. MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); + BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel); // Define the current CFA rule to use the provided offset. if (StackSize) { @@ -1007,7 +1032,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { if (needsFrameMoves) { // Mark effective beginning of when frame pointer becomes valid. MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(FrameLabel); + BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(FrameLabel); // Define the current CFA to use the EBP/RBP register. MachineLocation FPDst(FramePtr); @@ -1047,7 +1072,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { if (!HasFP && needsFrameMoves) { // Mark callee-saved push instruction. MCSymbol *Label = MMI.getContext().CreateTempSymbol(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); + BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); // Define the current CFA rule to use the provided offset. unsigned Ptr = StackSize ? @@ -1062,7 +1087,17 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { DL = MBB.findDebugLoc(MBBI); // Adjust stack pointer: ESP -= numbytes. - if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) { + + // Windows and cygwin/mingw require a prologue helper routine when allocating + // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw + // uses __alloca. __alloca and the 32-bit version of __chkstk will probe + // the stack and adjust the stack pointer in one go. The 64-bit version + // of __chkstk is only responsible for probing the stack. The 64-bit + // prologue is responsible for adjusting the stack pointer. Touching the + // stack at 4K increments is necessary to ensure that the guard pages used + // by the OS virtual memory manager are allocated in correct sequence. + if (NumBytes >= 4096 && + (Subtarget->isTargetCygMing() || Subtarget->isTargetWin32())) { // Check, whether EAX is livein for this function. bool isEAXAlive = false; for (MachineRegisterInfo::livein_iterator @@ -1073,16 +1108,16 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { Reg == X86::AH || Reg == X86::AL); } - // Function prologue calls _alloca to probe the stack when allocating more - // than 4k bytes in one go. Touching the stack at 4K increments is necessary - // to ensure that the guard pages used by the OS virtual memory manager are - // allocated in correct sequence. + + const char *StackProbeSymbol = + Subtarget->isTargetWindows() ? "_chkstk" : "_alloca"; if (!isEAXAlive) { BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) .addImm(NumBytes); BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) - .addExternalSymbol("_alloca") - .addReg(StackPtr, RegState::Define | RegState::Implicit); + .addExternalSymbol(StackProbeSymbol) + .addReg(StackPtr, RegState::Define | RegState::Implicit) + .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); } else { // Save EAX BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r)) @@ -1093,8 +1128,9 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX) .addImm(NumBytes - 4); BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32)) - .addExternalSymbol("_alloca") - .addReg(StackPtr, RegState::Define | RegState::Implicit); + .addExternalSymbol(StackProbeSymbol) + .addReg(StackPtr, RegState::Define | RegState::Implicit) + .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit); // Restore EAX MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm), @@ -1119,7 +1155,7 @@ void X86RegisterInfo::emitPrologue(MachineFunction &MF) const { if ((NumBytes || PushedRegs) && needsFrameMoves) { // Mark end of stack pointer adjustment. MCSymbol *Label = MMI.getContext().CreateTempSymbol(); - BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addSym(Label); + BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label); if (!HasFP && NumBytes) { // Define the current CFA rule to use the provided offset. @@ -1172,6 +1208,17 @@ void X86RegisterInfo::emitEpilogue(MachineFunction &MF, unsigned CSSize = X86FI->getCalleeSavedFrameSize(); uint64_t NumBytes = 0; + // If we're forcing a stack realignment we can't rely on just the frame + // info, we need to know the ABI stack alignment as well in case we + // have a call out. Otherwise just make sure we have some alignment - we'll + // go with the minimum. + if (ForceStackAlign) { + if (MFI->hasCalls()) + MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign; + else + MaxAlign = MaxAlign ? MaxAlign : 4; + } + if (hasFP(MF)) { // Calculate required stack adjustment. uint64_t FrameSize = StackSize - SlotSize; @@ -1519,7 +1566,7 @@ unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) { namespace { struct MSAH : public MachineFunctionPass { static char ID; - MSAH() : MachineFunctionPass(&ID) {} + MSAH() : MachineFunctionPass(ID) {} virtual bool runOnMachineFunction(MachineFunction &MF) { const X86TargetMachine *TM = diff --git a/lib/Target/X86/X86RegisterInfo.h b/lib/Target/X86/X86RegisterInfo.h index d852bcd2011c..527df05c58fc 100644 --- a/lib/Target/X86/X86RegisterInfo.h +++ b/lib/Target/X86/X86RegisterInfo.h @@ -117,18 +117,17 @@ public: bool needsStackRealignment(const MachineFunction &MF) const; - bool hasReservedCallFrame(MachineFunction &MF) const; + bool hasReservedCallFrame(const MachineFunction &MF) const; - bool hasReservedSpillSlot(MachineFunction &MF, unsigned Reg, + bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg, int &FrameIdx) const; void eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const; - unsigned eliminateFrameIndex(MachineBasicBlock::iterator MI, - int SPAdj, FrameIndexValue *Value = NULL, - RegScavenger *RS = NULL) const; + void eliminateFrameIndex(MachineBasicBlock::iterator MI, + int SPAdj, RegScavenger *RS = NULL) const; void processFunctionBeforeCalleeSavedScan(MachineFunction &MF, RegScavenger *RS = NULL) const; diff --git a/lib/Target/X86/X86RegisterInfo.td b/lib/Target/X86/X86RegisterInfo.td index 9f0382e3fae9..95269b15760e 100644 --- a/lib/Target/X86/X86RegisterInfo.td +++ b/lib/Target/X86/X86RegisterInfo.td @@ -241,6 +241,10 @@ let Namespace = "X86" in { def CR6 : Register<"cr6">; def CR7 : Register<"cr7">; def CR8 : Register<"cr8">; + + // Pseudo index registers + def EIZ : Register<"eiz">; + def RIZ : Register<"riz">; } @@ -804,7 +808,7 @@ def VR128 : RegisterClass<"X86", [v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],128, }]; } -def VR256 : RegisterClass<"X86", [v8i32, v4i64, v8f32, v4f64], 256, +def VR256 : RegisterClass<"X86", [v32i8, v8i32, v4i64, v8f32, v4f64], 256, [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7, YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15]> { @@ -829,4 +833,15 @@ def VR256 : RegisterClass<"X86", [v8i32, v4i64, v8f32, v4f64], 256, // Status flags registers. def CCR : RegisterClass<"X86", [i32], 32, [EFLAGS]> { let CopyCost = -1; // Don't allow copying of status registers. + + // EFLAGS is not allocatable. + let MethodProtos = [{ + iterator allocation_order_end(const MachineFunction &MF) const; + }]; + let MethodBodies = [{ + CCRClass::iterator + CCRClass::allocation_order_end(const MachineFunction &MF) const { + return allocation_order_begin(MF); + } + }]; } diff --git a/lib/Target/X86/X86ShuffleDecode.h b/lib/Target/X86/X86ShuffleDecode.h new file mode 100644 index 000000000000..df040520bc8f --- /dev/null +++ b/lib/Target/X86/X86ShuffleDecode.h @@ -0,0 +1,155 @@ +//===-- X86ShuffleDecode.h - X86 shuffle decode logic ---------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Define several functions to decode x86 specific shuffle semantics into a +// generic vector mask. +// +//===----------------------------------------------------------------------===// + +#ifndef X86_SHUFFLE_DECODE_H +#define X86_SHUFFLE_DECODE_H + +#include "llvm/ADT/SmallVector.h" +using namespace llvm; + +//===----------------------------------------------------------------------===// +// Vector Mask Decoding +//===----------------------------------------------------------------------===// + +enum { + SM_SentinelZero = ~0U +}; + +static inline +void DecodeINSERTPSMask(unsigned Imm, SmallVectorImpl<unsigned> &ShuffleMask) { + // Defaults the copying the dest value. + ShuffleMask.push_back(0); + ShuffleMask.push_back(1); + ShuffleMask.push_back(2); + ShuffleMask.push_back(3); + + // Decode the immediate. + unsigned ZMask = Imm & 15; + unsigned CountD = (Imm >> 4) & 3; + unsigned CountS = (Imm >> 6) & 3; + + // CountS selects which input element to use. + unsigned InVal = 4+CountS; + // CountD specifies which element of destination to update. + ShuffleMask[CountD] = InVal; + // ZMask zaps values, potentially overriding the CountD elt. + if (ZMask & 1) ShuffleMask[0] = SM_SentinelZero; + if (ZMask & 2) ShuffleMask[1] = SM_SentinelZero; + if (ZMask & 4) ShuffleMask[2] = SM_SentinelZero; + if (ZMask & 8) ShuffleMask[3] = SM_SentinelZero; +} + +// <3,1> or <6,7,2,3> +static void DecodeMOVHLPSMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = NElts/2; i != NElts; ++i) + ShuffleMask.push_back(NElts+i); + + for (unsigned i = NElts/2; i != NElts; ++i) + ShuffleMask.push_back(i); +} + +// <0,2> or <0,1,4,5> +static void DecodeMOVLHPSMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts/2; ++i) + ShuffleMask.push_back(i); + + for (unsigned i = 0; i != NElts/2; ++i) + ShuffleMask.push_back(NElts+i); +} + +static void DecodePSHUFMask(unsigned NElts, unsigned Imm, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts; ++i) { + ShuffleMask.push_back(Imm % NElts); + Imm /= NElts; + } +} + +static void DecodePSHUFHWMask(unsigned Imm, + SmallVectorImpl<unsigned> &ShuffleMask) { + ShuffleMask.push_back(0); + ShuffleMask.push_back(1); + ShuffleMask.push_back(2); + ShuffleMask.push_back(3); + for (unsigned i = 0; i != 4; ++i) { + ShuffleMask.push_back(4+(Imm & 3)); + Imm >>= 2; + } +} + +static void DecodePSHUFLWMask(unsigned Imm, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != 4; ++i) { + ShuffleMask.push_back((Imm & 3)); + Imm >>= 2; + } + ShuffleMask.push_back(4); + ShuffleMask.push_back(5); + ShuffleMask.push_back(6); + ShuffleMask.push_back(7); +} + +static void DecodePUNPCKLMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(i); + ShuffleMask.push_back(i+NElts); + } +} + +static void DecodePUNPCKHMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(i+NElts/2); + ShuffleMask.push_back(i+NElts+NElts/2); + } +} + +static void DecodeSHUFPSMask(unsigned NElts, unsigned Imm, + SmallVectorImpl<unsigned> &ShuffleMask) { + // Part that reads from dest. + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(Imm % NElts); + Imm /= NElts; + } + // Part that reads from src. + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(Imm % NElts + NElts); + Imm /= NElts; + } +} + +static void DecodeUNPCKHPMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(i+NElts/2); // Reads from dest + ShuffleMask.push_back(i+NElts+NElts/2); // Reads from src + } +} + + +/// DecodeUNPCKLPMask - This decodes the shuffle masks for unpcklps/unpcklpd +/// etc. NElts indicates the number of elements in the vector allowing it to +/// handle different datatypes and vector widths. +static void DecodeUNPCKLPMask(unsigned NElts, + SmallVectorImpl<unsigned> &ShuffleMask) { + for (unsigned i = 0; i != NElts/2; ++i) { + ShuffleMask.push_back(i); // Reads from dest + ShuffleMask.push_back(i+NElts); // Reads from src + } +} + +#endif diff --git a/lib/Target/X86/X86Subtarget.cpp b/lib/Target/X86/X86Subtarget.cpp index 4a10be518f03..0d02e5ee472b 100644 --- a/lib/Target/X86/X86Subtarget.cpp +++ b/lib/Target/X86/X86Subtarget.cpp @@ -73,7 +73,7 @@ ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const { if (GV->hasDefaultVisibility() && (isDecl || GV->isWeakForLinker())) return X86II::MO_GOTPCREL; - } else { + } else if (!isTargetWin64()) { assert(isTargetELF() && "Unknown rip-relative target"); // Extra load is needed for all externally visible. @@ -260,9 +260,10 @@ void X86Subtarget::AutoDetectSubtargetFeatures() { bool IsIntel = memcmp(text.c, "GenuineIntel", 12) == 0; bool IsAMD = !IsIntel && memcmp(text.c, "AuthenticAMD", 12) == 0; - HasFMA3 = IsIntel && ((ECX >> 12) & 0x1); - HasAVX = ((ECX >> 28) & 0x1); - HasAES = IsIntel && ((ECX >> 25) & 0x1); + HasCLMUL = IsIntel && ((ECX >> 1) & 0x1); + HasFMA3 = IsIntel && ((ECX >> 12) & 0x1); + HasAVX = ((ECX >> 28) & 0x1); + HasAES = IsIntel && ((ECX >> 25) & 0x1); if (IsIntel || IsAMD) { // Determine if bit test memory instructions are slow. @@ -291,6 +292,7 @@ X86Subtarget::X86Subtarget(const std::string &TT, const std::string &FS, , HasSSE4A(false) , HasAVX(false) , HasAES(false) + , HasCLMUL(false) , HasFMA3(false) , HasFMA4(false) , IsBTMemSlow(false) diff --git a/lib/Target/X86/X86Subtarget.h b/lib/Target/X86/X86Subtarget.h index 486dbc4e2e90..0ee91abe21f4 100644 --- a/lib/Target/X86/X86Subtarget.h +++ b/lib/Target/X86/X86Subtarget.h @@ -74,6 +74,9 @@ protected: /// HasAES - Target has AES instructions bool HasAES; + /// HasCLMUL - Target has carry-less multiplication + bool HasCLMUL; + /// HasFMA3 - Target has 3-operand fused multiply-add bool HasFMA3; @@ -149,6 +152,7 @@ public: bool has3DNowA() const { return X863DNowLevel >= ThreeDNowA; } bool hasAVX() const { return HasAVX; } bool hasAES() const { return HasAES; } + bool hasCLMUL() const { return HasCLMUL; } bool hasFMA3() const { return HasFMA3; } bool hasFMA4() const { return HasFMA4; } bool isBTMemSlow() const { return IsBTMemSlow; } @@ -182,6 +186,10 @@ public: return Is64Bit && (isTargetMingw() || isTargetWindows()); } + bool isTargetWin32() const { + return !Is64Bit && (isTargetMingw() || isTargetWindows()); + } + std::string getDataLayout() const { const char *p; if (is64Bit()) diff --git a/lib/Target/X86/X86TargetMachine.cpp b/lib/Target/X86/X86TargetMachine.cpp index df00d3ffcc79..ce8636eb72b5 100644 --- a/lib/Target/X86/X86TargetMachine.cpp +++ b/lib/Target/X86/X86TargetMachine.cpp @@ -46,8 +46,15 @@ static MCStreamer *createMCStreamer(const Target &T, const std::string &TT, bool RelaxAll) { Triple TheTriple(TT); switch (TheTriple.getOS()) { - default: + case Triple::Darwin: return createMachOStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll); + case Triple::MinGW32: + case Triple::MinGW64: + case Triple::Cygwin: + case Triple::Win32: + return createWinCOFFStreamer(Ctx, TAB, *_Emitter, _OS, RelaxAll); + default: + return createELFStreamer(Ctx, TAB, _OS, _Emitter, RelaxAll); } } @@ -105,15 +112,21 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT, InstrInfo(*this), JITInfo(*this), TLInfo(*this), TSInfo(*this), ELFWriterInfo(*this) { DefRelocModel = getRelocationModel(); - + // If no relocation model was picked, default as appropriate for the target. if (getRelocationModel() == Reloc::Default) { - if (!Subtarget.isTargetDarwin()) - setRelocationModel(Reloc::Static); - else if (Subtarget.is64Bit()) + // Darwin defaults to PIC in 64 bit mode and dynamic-no-pic in 32 bit mode. + // Win64 requires rip-rel addressing, thus we force it to PIC. Otherwise we + // use static relocation model by default. + if (Subtarget.isTargetDarwin()) { + if (Subtarget.is64Bit()) + setRelocationModel(Reloc::PIC_); + else + setRelocationModel(Reloc::DynamicNoPIC); + } else if (Subtarget.isTargetWin64()) setRelocationModel(Reloc::PIC_); else - setRelocationModel(Reloc::DynamicNoPIC); + setRelocationModel(Reloc::Static); } assert(getRelocationModel() != Reloc::Default && @@ -136,29 +149,27 @@ X86TargetMachine::X86TargetMachine(const Target &T, const std::string &TT, Subtarget.isTargetDarwin() && is64Bit) setRelocationModel(Reloc::PIC_); - + // Determine the PICStyle based on the target selected. if (getRelocationModel() == Reloc::Static) { // Unless we're in PIC or DynamicNoPIC mode, set the PIC style to None. Subtarget.setPICStyle(PICStyles::None); + } else if (Subtarget.is64Bit()) { + // PIC in 64 bit mode is always rip-rel. + Subtarget.setPICStyle(PICStyles::RIPRel); } else if (Subtarget.isTargetCygMing()) { Subtarget.setPICStyle(PICStyles::None); } else if (Subtarget.isTargetDarwin()) { - if (Subtarget.is64Bit()) - Subtarget.setPICStyle(PICStyles::RIPRel); - else if (getRelocationModel() == Reloc::PIC_) + if (getRelocationModel() == Reloc::PIC_) Subtarget.setPICStyle(PICStyles::StubPIC); else { assert(getRelocationModel() == Reloc::DynamicNoPIC); Subtarget.setPICStyle(PICStyles::StubDynamicNoPIC); } } else if (Subtarget.isTargetELF()) { - if (Subtarget.is64Bit()) - Subtarget.setPICStyle(PICStyles::RIPRel); - else - Subtarget.setPICStyle(PICStyles::GOT); + Subtarget.setPICStyle(PICStyles::GOT); } - + // Finally, if we have "none" as our PIC style, force to static mode. if (Subtarget.getPICStyle() == PICStyles::None) setRelocationModel(Reloc::Static); @@ -182,9 +193,6 @@ bool X86TargetMachine::addInstSelector(PassManagerBase &PM, bool X86TargetMachine::addPreRegAlloc(PassManagerBase &PM, CodeGenOpt::Level OptLevel) { - // Install a pass to insert x87 FP_REG_KILL instructions, as needed. - PM.add(createX87FPRegKillInserterPass()); - PM.add(createX86MaxStackAlignmentHeuristicPass()); return false; // -print-machineinstr shouldn't print after this. } |