diff options
Diffstat (limited to 'lib/Target/Mips')
64 files changed, 1499 insertions, 518 deletions
diff --git a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp index ce7db657f5e9..d2fed6861477 100644 --- a/lib/Target/Mips/AsmParser/MipsAsmParser.cpp +++ b/lib/Target/Mips/AsmParser/MipsAsmParser.cpp @@ -39,6 +39,7 @@ #include "llvm/MC/MCValue.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" @@ -64,6 +65,11 @@ class MCInstrInfo; } // end namespace llvm +static cl::opt<bool> +EmitJalrReloc("mips-jalr-reloc", cl::Hidden, + cl::desc("MIPS: Emit R_{MICRO}MIPS_JALR relocation with jalr"), + cl::init(true)); + namespace { class MipsAssemblerOptions { @@ -195,7 +201,6 @@ class MipsAsmParser : public MCTargetAsmParser { OperandMatchResultTy parseImm(OperandVector &Operands); OperandMatchResultTy parseJumpTarget(OperandVector &Operands); OperandMatchResultTy parseInvNum(OperandVector &Operands); - OperandMatchResultTy parseMovePRegPair(OperandVector &Operands); OperandMatchResultTy parseRegisterList(OperandVector &Operands); bool searchSymbolAlias(OperandVector &Operands); @@ -760,7 +765,6 @@ private: k_RegisterIndex, /// A register index in one or more RegKind. k_Token, /// A simple token k_RegList, /// A physical register list - k_RegPair /// A pair of physical register } Kind; public: @@ -769,16 +773,15 @@ public: ~MipsOperand() override { switch (Kind) { - case k_Immediate: - break; case k_Memory: delete Mem.Base; break; case k_RegList: delete RegList.List; + break; + case k_Immediate: case k_RegisterIndex: case k_Token: - case k_RegPair: break; } } @@ -1038,6 +1041,17 @@ public: Inst.addOperand(MCOperand::createReg(getGPRMM16Reg())); } + void addGPRMM16AsmRegMovePPairFirstOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getGPRMM16Reg())); + } + + void addGPRMM16AsmRegMovePPairSecondOperands(MCInst &Inst, + unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createReg(getGPRMM16Reg())); + } + /// Render the operand to an MCInst as a GPR64 /// Asserts if the wrong number of operands are requested, or the operand /// is not a k_RegisterIndex compatible with RegKind_GPR @@ -1217,29 +1231,6 @@ public: Inst.addOperand(MCOperand::createReg(RegNo)); } - void addRegPairOperands(MCInst &Inst, unsigned N) const { - assert(N == 2 && "Invalid number of operands!"); - assert((RegIdx.Kind & RegKind_GPR) && "Invalid access!"); - unsigned RegNo = getRegPair(); - AsmParser.warnIfRegIndexIsAT(RegNo, StartLoc); - Inst.addOperand(MCOperand::createReg( - RegIdx.RegInfo->getRegClass( - AsmParser.getABI().AreGprs64bit() - ? Mips::GPR64RegClassID - : Mips::GPR32RegClassID).getRegister(RegNo++))); - Inst.addOperand(MCOperand::createReg( - RegIdx.RegInfo->getRegClass( - AsmParser.getABI().AreGprs64bit() - ? Mips::GPR64RegClassID - : Mips::GPR32RegClassID).getRegister(RegNo))); - } - - void addMovePRegPairOperands(MCInst &Inst, unsigned N) const { - assert(N == 2 && "Invalid number of operands!"); - for (auto RegNo : getRegList()) - Inst.addOperand(MCOperand::createReg(RegNo)); - } - bool isReg() const override { // As a special case until we sort out the definition of div/divu, accept // $0/$zero here so that MCK_ZERO works correctly. @@ -1406,34 +1397,6 @@ public: bool isRegList() const { return Kind == k_RegList; } - bool isMovePRegPair() const { - if (Kind != k_RegList || RegList.List->size() != 2) - return false; - - unsigned R0 = RegList.List->front(); - unsigned R1 = RegList.List->back(); - - if ((R0 == Mips::A1 && R1 == Mips::A2) || - (R0 == Mips::A1 && R1 == Mips::A3) || - (R0 == Mips::A2 && R1 == Mips::A3) || - (R0 == Mips::A0 && R1 == Mips::S5) || - (R0 == Mips::A0 && R1 == Mips::S6) || - (R0 == Mips::A0 && R1 == Mips::A1) || - (R0 == Mips::A0 && R1 == Mips::A2) || - (R0 == Mips::A0 && R1 == Mips::A3) || - (R0 == Mips::A1_64 && R1 == Mips::A2_64) || - (R0 == Mips::A1_64 && R1 == Mips::A3_64) || - (R0 == Mips::A2_64 && R1 == Mips::A3_64) || - (R0 == Mips::A0_64 && R1 == Mips::S5_64) || - (R0 == Mips::A0_64 && R1 == Mips::S6_64) || - (R0 == Mips::A0_64 && R1 == Mips::A1_64) || - (R0 == Mips::A0_64 && R1 == Mips::A2_64) || - (R0 == Mips::A0_64 && R1 == Mips::A3_64)) - return true; - - return false; - } - StringRef getToken() const { assert(Kind == k_Token && "Invalid access!"); return StringRef(Tok.Data, Tok.Length); @@ -1481,11 +1444,6 @@ public: return *(RegList.List); } - unsigned getRegPair() const { - assert((Kind == k_RegPair) && "Invalid access!"); - return RegIdx.Index; - } - static std::unique_ptr<MipsOperand> CreateToken(StringRef Str, SMLoc S, MipsAsmParser &Parser) { auto Op = llvm::make_unique<MipsOperand>(k_Token, Parser); @@ -1593,18 +1551,6 @@ public: return Op; } - static std::unique_ptr<MipsOperand> CreateRegPair(const MipsOperand &MOP, - SMLoc S, SMLoc E, - MipsAsmParser &Parser) { - auto Op = llvm::make_unique<MipsOperand>(k_RegPair, Parser); - Op->RegIdx.Index = MOP.RegIdx.Index; - Op->RegIdx.RegInfo = MOP.RegIdx.RegInfo; - Op->RegIdx.Kind = MOP.RegIdx.Kind; - Op->StartLoc = S; - Op->EndLoc = E; - return Op; - } - bool isGPRZeroAsmReg() const { return isRegIdx() && RegIdx.Kind & RegKind_GPR && RegIdx.Index == 0; } @@ -1640,6 +1586,19 @@ public: (RegIdx.Index >= 16 && RegIdx.Index <= 20)); } + bool isMM16AsmRegMovePPairFirst() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return RegIdx.Index >= 4 && RegIdx.Index <= 6; + } + + bool isMM16AsmRegMovePPairSecond() const { + if (!(isRegIdx() && RegIdx.Kind)) + return false; + return (RegIdx.Index == 21 || RegIdx.Index == 22 || + (RegIdx.Index >= 5 && RegIdx.Index <= 7)); + } + bool isFGRAsmReg() const { // AFGR64 is $0-$15 but we handle this in getAFGR64() return isRegIdx() && RegIdx.Kind & RegKind_FGR && RegIdx.Index <= 31; @@ -1720,9 +1679,6 @@ public: OS << Reg << " "; OS << ">"; break; - case k_RegPair: - OS << "RegPair<" << RegIdx.Index << "," << RegIdx.Index + 1 << ">"; - break; } } @@ -1755,14 +1711,23 @@ static const MCInstrDesc &getInstDesc(unsigned Opcode) { return MipsInsts[Opcode]; } -static bool hasShortDelaySlot(unsigned Opcode) { - switch (Opcode) { +static bool hasShortDelaySlot(MCInst &Inst) { + switch (Inst.getOpcode()) { + case Mips::BEQ_MM: + case Mips::BNE_MM: + case Mips::BLTZ_MM: + case Mips::BGEZ_MM: + case Mips::BLEZ_MM: + case Mips::BGTZ_MM: + case Mips::JRC16_MM: case Mips::JALS_MM: case Mips::JALRS_MM: case Mips::JALRS16_MM: case Mips::BGEZALS_MM: case Mips::BLTZALS_MM: return true; + case Mips::J_MM: + return !Inst.getOperand(0).isReg(); default: return false; } @@ -2115,9 +2080,21 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, JalrInst.addOperand(MCOperand::createReg(Mips::RA)); JalrInst.addOperand(MCOperand::createReg(Mips::T9)); - // FIXME: Add an R_(MICRO)MIPS_JALR relocation after the JALR. - // This relocation is supposed to be an optimization hint for the linker - // and is not necessary for correctness. + if (EmitJalrReloc) { + // As an optimization hint for the linker, before the JALR we add: + // .reloc tmplabel, R_{MICRO}MIPS_JALR, symbol + // tmplabel: + MCSymbol *TmpLabel = getContext().createTempSymbol(); + const MCExpr *TmpExpr = MCSymbolRefExpr::create(TmpLabel, getContext()); + const MCExpr *RelocJalrExpr = + MCSymbolRefExpr::create(JalSym, MCSymbolRefExpr::VK_None, + getContext(), IDLoc); + + TOut.getStreamer().EmitRelocDirective(*TmpExpr, + inMicroMipsMode() ? "R_MICROMIPS_JALR" : "R_MIPS_JALR", + RelocJalrExpr, IDLoc, *STI); + TOut.getStreamer().EmitLabel(TmpLabel); + } Inst = JalrInst; ExpandedJalSym = true; @@ -2288,6 +2265,22 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, if (Inst.getOperand(0).getReg() == Mips::RA) return Error(IDLoc, "invalid operand for instruction"); break; + case Mips::MOVEP_MM: + case Mips::MOVEP_MMR6: { + unsigned R0 = Inst.getOperand(0).getReg(); + unsigned R1 = Inst.getOperand(1).getReg(); + bool RegPair = ((R0 == Mips::A1 && R1 == Mips::A2) || + (R0 == Mips::A1 && R1 == Mips::A3) || + (R0 == Mips::A2 && R1 == Mips::A3) || + (R0 == Mips::A0 && R1 == Mips::S5) || + (R0 == Mips::A0 && R1 == Mips::S6) || + (R0 == Mips::A0 && R1 == Mips::A1) || + (R0 == Mips::A0 && R1 == Mips::A2) || + (R0 == Mips::A0 && R1 == Mips::A3)); + if (!RegPair) + return Error(IDLoc, "invalid operand for instruction"); + break; + } } } @@ -2318,7 +2311,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, // If this instruction has a delay slot and .set reorder is active, // emit a NOP after it. if (FillDelaySlot) { - TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc, STI); + TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc, STI); TOut.emitDirectiveSetReorder(); } @@ -2330,7 +2323,7 @@ bool MipsAsmParser::processInstruction(MCInst &Inst, SMLoc IDLoc, // If .set reorder has been used, we've already emitted a NOP. // If .set noreorder has been used, we need to emit a NOP at this point. if (!AssemblerOptions.back()->isReorder()) - TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst.getOpcode()), IDLoc, + TOut.emitEmptyDelaySlot(hasShortDelaySlot(Inst), IDLoc, STI); // Load the $gp from the stack. @@ -2617,7 +2610,7 @@ bool MipsAsmParser::expandJalWithRegs(MCInst &Inst, SMLoc IDLoc, // emit a NOP after it. const MCInstrDesc &MCID = getInstDesc(JalrInst.getOpcode()); if (MCID.hasDelaySlot() && AssemblerOptions.back()->isReorder()) - TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst.getOpcode()), IDLoc, + TOut.emitEmptyDelaySlot(hasShortDelaySlot(JalrInst), IDLoc, STI); return false; @@ -6278,45 +6271,6 @@ MipsAsmParser::parseRegisterList(OperandVector &Operands) { return MatchOperand_Success; } -OperandMatchResultTy -MipsAsmParser::parseMovePRegPair(OperandVector &Operands) { - MCAsmParser &Parser = getParser(); - SmallVector<std::unique_ptr<MCParsedAsmOperand>, 8> TmpOperands; - SmallVector<unsigned, 10> Regs; - - if (Parser.getTok().isNot(AsmToken::Dollar)) - return MatchOperand_ParseFail; - - SMLoc S = Parser.getTok().getLoc(); - - if (parseAnyRegister(TmpOperands) != MatchOperand_Success) - return MatchOperand_ParseFail; - - MipsOperand *Reg = &static_cast<MipsOperand &>(*TmpOperands.back()); - unsigned RegNo = isGP64bit() ? Reg->getGPR64Reg() : Reg->getGPR32Reg(); - Regs.push_back(RegNo); - - SMLoc E = Parser.getTok().getLoc(); - if (Parser.getTok().isNot(AsmToken::Comma)) { - Error(E, "',' expected"); - return MatchOperand_ParseFail; - } - - // Remove comma. - Parser.Lex(); - - if (parseAnyRegister(TmpOperands) != MatchOperand_Success) - return MatchOperand_ParseFail; - - Reg = &static_cast<MipsOperand &>(*TmpOperands.back()); - RegNo = isGP64bit() ? Reg->getGPR64Reg() : Reg->getGPR32Reg(); - Regs.push_back(RegNo); - - Operands.push_back(MipsOperand::CreateRegList(Regs, S, E, *this)); - - return MatchOperand_Success; -} - /// Sometimes (i.e. load/stores) the operand may be followed immediately by /// either this. /// ::= '(', register, ')' @@ -6371,6 +6325,9 @@ bool MipsAsmParser::parseBracketSuffix(StringRef Name, return false; } +static std::string MipsMnemonicSpellCheck(StringRef S, uint64_t FBS, + unsigned VariantID = 0); + bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, SMLoc NameLoc, OperandVector &Operands) { MCAsmParser &Parser = getParser(); @@ -6381,7 +6338,9 @@ bool MipsAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name, // Check if we have valid mnemonic if (!mnemonicIsValid(Name, 0)) { - return Error(NameLoc, "unknown instruction"); + uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits()); + std::string Suggestion = MipsMnemonicSpellCheck(Name, FBS); + return Error(NameLoc, "unknown instruction" + Suggestion); } // First operand in MCInst is instruction mnemonic. Operands.push_back(MipsOperand::CreateToken(Name, NameLoc, *this)); @@ -8257,6 +8216,7 @@ extern "C" void LLVMInitializeMipsAsmParser() { #define GET_REGISTER_MATCHER #define GET_MATCHER_IMPLEMENTATION +#define GET_MNEMONIC_SPELL_CHECKER #include "MipsGenAsmMatcher.inc" bool MipsAsmParser::mnemonicIsValid(StringRef Mnemonic, unsigned VariantID) { diff --git a/lib/Target/Mips/CMakeLists.txt b/lib/Target/Mips/CMakeLists.txt index 2cacc0a0870c..b67fb46cf66a 100644 --- a/lib/Target/Mips/CMakeLists.txt +++ b/lib/Target/Mips/CMakeLists.txt @@ -44,6 +44,7 @@ add_llvm_target(MipsCodeGen MipsModuleISelDAGToDAG.cpp MipsOptimizePICCall.cpp MipsOs16.cpp + MipsPreLegalizerCombiner.cpp MipsRegisterBankInfo.cpp MipsRegisterInfo.cpp MipsSEFrameLowering.cpp diff --git a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp index b94afb9520e3..27b27ff1e1e2 100644 --- a/lib/Target/Mips/Disassembler/MipsDisassembler.cpp +++ b/lib/Target/Mips/Disassembler/MipsDisassembler.cpp @@ -538,6 +538,9 @@ static DecodeStatus DecodeMovePRegPair(MCInst &Inst, unsigned RegPair, uint64_t Address, const void *Decoder); +static DecodeStatus DecodeMovePOperands(MCInst &Inst, unsigned Insn, + uint64_t Address, const void *Decoder); + namespace llvm { Target &getTheMipselTarget(); @@ -2450,6 +2453,32 @@ static DecodeStatus DecodeRegListOperand16(MCInst &Inst, unsigned Insn, return MCDisassembler::Success; } +static DecodeStatus DecodeMovePOperands(MCInst &Inst, unsigned Insn, + uint64_t Address, + const void *Decoder) { + unsigned RegPair = fieldFromInstruction(Insn, 7, 3); + if (DecodeMovePRegPair(Inst, RegPair, Address, Decoder) == + MCDisassembler::Fail) + return MCDisassembler::Fail; + + unsigned RegRs; + if (static_cast<const MipsDisassembler*>(Decoder)->hasMips32r6()) + RegRs = fieldFromInstruction(Insn, 0, 2) | + (fieldFromInstruction(Insn, 3, 1) << 2); + else + RegRs = fieldFromInstruction(Insn, 1, 3); + if (DecodeGPRMM16MovePRegisterClass(Inst, RegRs, Address, Decoder) == + MCDisassembler::Fail) + return MCDisassembler::Fail; + + unsigned RegRt = fieldFromInstruction(Insn, 4, 3); + if (DecodeGPRMM16MovePRegisterClass(Inst, RegRt, Address, Decoder) == + MCDisassembler::Fail) + return MCDisassembler::Fail; + + return MCDisassembler::Success; +} + static DecodeStatus DecodeMovePRegPair(MCInst &Inst, unsigned RegPair, uint64_t Address, const void *Decoder) { switch (RegPair) { diff --git a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp index bf1390880281..18d7dd99be34 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsABIInfo.cpp @@ -55,6 +55,8 @@ MipsABIInfo MipsABIInfo::computeTargetABI(const Triple &TT, StringRef CPU, return MipsABIInfo::N32(); if (Options.getABIName().startswith("n64")) return MipsABIInfo::N64(); + if (TT.getEnvironment() == llvm::Triple::GNUABIN32) + return MipsABIInfo::N32(); assert(Options.getABIName().empty() && "Unknown ABI option for MIPS"); if (TT.isMIPS64()) diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp index 4397c971d080..265d1141cb0b 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -13,6 +13,7 @@ // #include "MCTargetDesc/MipsAsmBackend.h" +#include "MCTargetDesc/MipsABIInfo.h" #include "MCTargetDesc/MipsFixupKinds.h" #include "MCTargetDesc/MipsMCExpr.h" #include "MCTargetDesc/MipsMCTargetDesc.h" @@ -338,6 +339,8 @@ Optional<MCFixupKind> MipsAsmBackend::getFixupKind(StringRef Name) const { (MCFixupKind)Mips::fixup_MICROMIPS_TLS_TPREL_HI16) .Case("R_MICROMIPS_TLS_TPREL_LO16", (MCFixupKind)Mips::fixup_MICROMIPS_TLS_TPREL_LO16) + .Case("R_MIPS_JALR", (MCFixupKind)Mips::fixup_Mips_JALR) + .Case("R_MICROMIPS_JALR", (MCFixupKind)Mips::fixup_MICROMIPS_JALR) .Default(MCAsmBackend::getFixupKind(Name)); } @@ -416,7 +419,9 @@ getFixupKindInfo(MCFixupKind Kind) const { { "fixup_MICROMIPS_TLS_TPREL_HI16", 0, 16, 0 }, { "fixup_MICROMIPS_TLS_TPREL_LO16", 0, 16, 0 }, { "fixup_Mips_SUB", 0, 64, 0 }, - { "fixup_MICROMIPS_SUB", 0, 64, 0 } + { "fixup_MICROMIPS_SUB", 0, 64, 0 }, + { "fixup_Mips_JALR", 0, 32, 0 }, + { "fixup_MICROMIPS_JALR", 0, 32, 0 } }; static_assert(array_lengthof(LittleEndianInfos) == Mips::NumTargetFixupKinds, "Not all MIPS little endian fixup kinds added!"); @@ -494,7 +499,9 @@ getFixupKindInfo(MCFixupKind Kind) const { { "fixup_MICROMIPS_TLS_TPREL_HI16", 16, 16, 0 }, { "fixup_MICROMIPS_TLS_TPREL_LO16", 16, 16, 0 }, { "fixup_Mips_SUB", 0, 64, 0 }, - { "fixup_MICROMIPS_SUB", 0, 64, 0 } + { "fixup_MICROMIPS_SUB", 0, 64, 0 }, + { "fixup_Mips_JALR", 0, 32, 0 }, + { "fixup_MICROMIPS_JALR", 0, 32, 0 } }; static_assert(array_lengthof(BigEndianInfos) == Mips::NumTargetFixupKinds, "Not all MIPS big endian fixup kinds added!"); @@ -552,6 +559,7 @@ bool MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm, case Mips::fixup_Mips_TLSLDM: case Mips::fixup_Mips_TPREL_HI: case Mips::fixup_Mips_TPREL_LO: + case Mips::fixup_Mips_JALR: case Mips::fixup_MICROMIPS_CALL16: case Mips::fixup_MICROMIPS_GOT_DISP: case Mips::fixup_MICROMIPS_GOT_PAGE: @@ -564,14 +572,23 @@ bool MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm, case Mips::fixup_MICROMIPS_TLS_LDM: case Mips::fixup_MICROMIPS_TLS_TPREL_HI16: case Mips::fixup_MICROMIPS_TLS_TPREL_LO16: + case Mips::fixup_MICROMIPS_JALR: return true; } } +bool MipsAsmBackend::isMicroMips(const MCSymbol *Sym) const { + if (const auto *ElfSym = dyn_cast<const MCSymbolELF>(Sym)) { + if (ElfSym->getOther() & ELF::STO_MIPS_MICROMIPS) + return true; + } + return false; +} + MCAsmBackend *llvm::createMipsAsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options) { - return new MipsAsmBackend(T, MRI, STI.getTargetTriple(), STI.getCPU(), - Options.ABIName == "n32"); + MipsABIInfo ABI = MipsABIInfo::computeTargetABI(STI.getTargetTriple(), STI.getCPU(), Options); + return new MipsAsmBackend(T, MRI, STI.getTargetTriple(), STI.getCPU(), ABI.IsN32()); } diff --git a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h index 3d5e16fcf9b4..30359132e92b 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h +++ b/lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h @@ -25,6 +25,7 @@ class MCAssembler; struct MCFixupKindInfo; class MCObjectWriter; class MCRegisterInfo; +class MCSymbolELF; class Target; class MipsAsmBackend : public MCAsmBackend { @@ -90,6 +91,7 @@ public: bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target) override; + bool isMicroMips(const MCSymbol *Sym) const override; }; // class MipsAsmBackend } // namespace diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp index 3dc753772e5f..8ace2895d681 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp @@ -11,6 +11,7 @@ #include "MCTargetDesc/MipsMCTargetDesc.h" #include "llvm/ADT/STLExtras.h" #include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCContext.h" #include "llvm/MC/MCELFObjectWriter.h" #include "llvm/MC/MCFixup.h" #include "llvm/MC/MCObjectWriter.h" @@ -225,7 +226,9 @@ unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx, case Mips::fixup_Mips_NONE: return ELF::R_MIPS_NONE; case FK_Data_1: - report_fatal_error("MIPS does not support one byte relocations"); + Ctx.reportError(Fixup.getLoc(), + "MIPS does not support one byte relocations"); + return ELF::R_MIPS_NONE; case Mips::fixup_Mips_16: case FK_Data_2: return IsPCRel ? ELF::R_MIPS_PC16 : ELF::R_MIPS_16; @@ -236,6 +239,10 @@ unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx, if (IsPCRel) { switch (Kind) { + case FK_Data_8: + Ctx.reportError(Fixup.getLoc(), + "MIPS does not support 64-bit PC-relative relocations"); + return ELF::R_MIPS_NONE; case Mips::fixup_Mips_Branch_PCRel: case Mips::fixup_Mips_PC16: return ELF::R_MIPS_PC16; @@ -401,6 +408,10 @@ unsigned MipsELFObjectWriter::getRelocType(MCContext &Ctx, return ELF::R_MICROMIPS_HIGHER; case Mips::fixup_MICROMIPS_HIGHEST: return ELF::R_MICROMIPS_HIGHEST; + case Mips::fixup_Mips_JALR: + return ELF::R_MIPS_JALR; + case Mips::fixup_MICROMIPS_JALR: + return ELF::R_MICROMIPS_JALR; } llvm_unreachable("invalid fixup kind!"); @@ -453,7 +464,7 @@ void MipsELFObjectWriter::sortRelocs(const MCAssembler &Asm, return; // Sort relocations by the address they are applied to. - llvm::sort(Relocs.begin(), Relocs.end(), + llvm::sort(Relocs, [](const ELFRelocationEntry &A, const ELFRelocationEntry &B) { return A.Offset < B.Offset; }); diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp index 7b9a02503ce2..21b01e850967 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.cpp @@ -15,6 +15,7 @@ #include "llvm/MC/MCAssembler.h" #include "llvm/MC/MCCodeEmitter.h" #include "llvm/MC/MCContext.h" +#include "llvm/MC/MCDwarf.h" #include "llvm/MC/MCInst.h" #include "llvm/MC/MCObjectWriter.h" #include "llvm/MC/MCSymbolELF.h" @@ -53,6 +54,22 @@ void MipsELFStreamer::EmitInstruction(const MCInst &Inst, createPendingLabelRelocs(); } +void MipsELFStreamer::EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) { + Frame.Begin = getContext().createTempSymbol(); + MCELFStreamer::EmitLabel(Frame.Begin); +} + +MCSymbol *MipsELFStreamer::EmitCFILabel() { + MCSymbol *Label = getContext().createTempSymbol("cfi", true); + MCELFStreamer::EmitLabel(Label); + return Label; +} + +void MipsELFStreamer::EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) { + Frame.End = getContext().createTempSymbol(); + MCELFStreamer::EmitLabel(Frame.End); +} + void MipsELFStreamer::createPendingLabelRelocs() { MipsTargetELFStreamer *ELFTargetStreamer = static_cast<MipsTargetELFStreamer *>(getTargetStreamer()); diff --git a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h index d141f5d77c61..56a0ff96c7bd 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h +++ b/lib/Target/Mips/MCTargetDesc/MipsELFStreamer.h @@ -26,6 +26,7 @@ class MCAsmBackend; class MCCodeEmitter; class MCContext; class MCSubtargetInfo; +struct MCDwarfFrameInfo; class MipsELFStreamer : public MCELFStreamer { SmallVector<std::unique_ptr<MipsOptionRecord>, 8> MipsOptionRecords; @@ -60,6 +61,12 @@ public: void EmitValueImpl(const MCExpr *Value, unsigned Size, SMLoc Loc) override; void EmitIntValue(uint64_t Value, unsigned Size) override; + // Overriding these functions allows us to avoid recording of these labels + // in EmitLabel and later marking them as microMIPS. + void EmitCFIStartProcImpl(MCDwarfFrameInfo &Frame) override; + void EmitCFIEndProcImpl(MCDwarfFrameInfo &Frame) override; + MCSymbol *EmitCFILabel() override; + /// Emits all the option records stored up until the point it's called. void EmitMipsOptionRecords(); diff --git a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h index d7f6cf91db73..eedad16dddc3 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h +++ b/lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h @@ -222,6 +222,10 @@ namespace Mips { fixup_Mips_SUB, fixup_MICROMIPS_SUB, + // resulting in - R_MIPS_JALR/R_MICROMIPS_JALR + fixup_Mips_JALR, + fixup_MICROMIPS_JALR, + // Marker LastTargetFixupKind, NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp index f498d830c8f0..1506b4a83649 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCAsmInfo.cpp @@ -21,9 +21,8 @@ void MipsMCAsmInfo::anchor() { } MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple) { IsLittleEndian = TheTriple.isLittleEndian(); - if (TheTriple.isMIPS64()) { + if (TheTriple.isMIPS64() && TheTriple.getEnvironment() != Triple::GNUABIN32) CodePointerSize = CalleeSaveStackSlotSize = 8; - } // FIXME: This condition isn't quite right but it's the best we can do until // this object can identify the ABI. It will misbehave when using O32 @@ -50,21 +49,5 @@ MipsMCAsmInfo::MipsMCAsmInfo(const Triple &TheTriple) { ExceptionsType = ExceptionHandling::DwarfCFI; DwarfRegNumForCFI = true; HasMipsExpressions = true; - - // Enable IAS by default for O32. - if (TheTriple.isMIPS32()) - UseIntegratedAssembler = true; - - // Enable IAS by default for Debian mips64/mips64el. - if (TheTriple.getEnvironment() == Triple::GNUABI64) - UseIntegratedAssembler = true; - - // Enable IAS by default for Android mips64el that uses N64 ABI. - if (TheTriple.getArch() == Triple::mips64el && TheTriple.isAndroid()) - UseIntegratedAssembler = true; - - // Enable IAS by default for FreeBSD / OpenBSD mips64/mips64el. - if (TheTriple.isOSFreeBSD() || - TheTriple.isOSOpenBSD()) - UseIntegratedAssembler = true; + UseIntegratedAssembler = true; } diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp index cd34b0ab70b4..f43a4d980f92 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp @@ -213,6 +213,12 @@ encodeInstruction(const MCInst &MI, raw_ostream &OS, TmpInst.setOpcode (NewOpcode); Binary = getBinaryCodeForInstr(TmpInst, Fixups, STI); } + + if (((MI.getOpcode() == Mips::MOVEP_MM) || + (MI.getOpcode() == Mips::MOVEP_MMR6))) { + unsigned RegPair = getMovePRegPairOpValue(MI, 0, Fixups, STI); + Binary = (Binary & 0xFFFFFC7F) | (RegPair << 7); + } } const MCInstrDesc &Desc = MCII.get(TmpInst.getOpcode()); @@ -607,6 +613,9 @@ getExprOpValue(const MCExpr *Expr, SmallVectorImpl<MCFixup> &Fixups, case MipsMCExpr::MEK_Special: llvm_unreachable("Unhandled fixup kind!"); break; + case MipsMCExpr::MEK_DTPREL: + llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only"); + break; case MipsMCExpr::MEK_CALL_HI16: FixupKind = Mips::fixup_Mips_CALL_HI16; break; diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp index 0bddba781453..99857e083c6c 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.cpp @@ -43,6 +43,9 @@ void MipsMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const { case MEK_Special: llvm_unreachable("MEK_None and MEK_Special are invalid"); break; + case MEK_DTPREL: + llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only"); + break; case MEK_CALL_HI16: OS << "%call_hi"; break; @@ -157,6 +160,8 @@ MipsMCExpr::evaluateAsRelocatableImpl(MCValue &Res, case MEK_None: case MEK_Special: llvm_unreachable("MEK_None and MEK_Special are invalid"); + case MEK_DTPREL: + llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only"); case MEK_DTPREL_HI: case MEK_DTPREL_LO: case MEK_GOT: @@ -244,6 +249,9 @@ void MipsMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const { case MEK_Special: llvm_unreachable("MEK_None and MEK_Special are invalid"); break; + case MEK_DTPREL: + llvm_unreachable("MEK_DTPREL is used for TLS DIEExpr only"); + break; case MEK_CALL_HI16: case MEK_CALL_LO16: case MEK_GOT: diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h index 495d525ccff4..bf3274ab5d17 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h +++ b/lib/Target/Mips/MCTargetDesc/MipsMCExpr.h @@ -22,6 +22,7 @@ public: MEK_None, MEK_CALL_HI16, MEK_CALL_LO16, + MEK_DTPREL, MEK_DTPREL_HI, MEK_DTPREL_LO, MEK_GOT, diff --git a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp index ce208b7f98bc..a8cd7b0d9b03 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsMCTargetDesc.cpp @@ -47,10 +47,17 @@ using namespace llvm; /// FIXME: Merge with the copy in MipsSubtarget.cpp StringRef MIPS_MC::selectMipsCPU(const Triple &TT, StringRef CPU) { if (CPU.empty() || CPU == "generic") { - if (TT.isMIPS32()) - CPU = "mips32"; - else - CPU = "mips64"; + if (TT.getSubArch() == llvm::Triple::MipsSubArch_r6) { + if (TT.isMIPS32()) + CPU = "mips32r6"; + else + CPU = "mips64r6"; + } else { + if (TT.isMIPS32()) + CPU = "mips32"; + else + CPU = "mips64"; + } } return CPU; } diff --git a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp index 1eb21b6cc826..58f9717e1cc6 100644 --- a/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp +++ b/lib/Target/Mips/MCTargetDesc/MipsTargetStreamer.cpp @@ -248,7 +248,11 @@ void MipsTargetStreamer::emitEmptyDelaySlot(bool hasShortDelaySlot, SMLoc IDLoc, } void MipsTargetStreamer::emitNop(SMLoc IDLoc, const MCSubtargetInfo *STI) { - emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI); + const FeatureBitset &Features = STI->getFeatureBits(); + if (Features[Mips::FeatureMicroMips]) + emitRR(Mips::MOVE16_MM, Mips::ZERO, Mips::ZERO, IDLoc, STI); + else + emitRRI(Mips::SLL, Mips::ZERO, Mips::ZERO, 0, IDLoc, STI); } /// Emit the $gp restore operation for .cprestore. diff --git a/lib/Target/Mips/MicroMips32r6InstrInfo.td b/lib/Target/Mips/MicroMips32r6InstrInfo.td index f795112ae2b7..814918d25e70 100644 --- a/lib/Target/Mips/MicroMips32r6InstrInfo.td +++ b/lib/Target/Mips/MicroMips32r6InstrInfo.td @@ -159,6 +159,7 @@ class SYNC_MMR6_ENC : POOL32A_SYNC_FM_MMR6; class SYNCI_MMR6_ENC : POOL32I_SYNCI_FM_MMR6, MMR6Arch<"synci">; class RDPGPR_MMR6_ENC : POOL32A_RDPGPR_FM_MMR6<0b1110000101>; class SDBBP_MMR6_ENC : SDBBP_FM_MM, MMR6Arch<"sdbbp">; +class SIGRIE_MMR6_ENC : SIGRIE_FM_MM, MMR6Arch<"sigrie">; class XOR_MMR6_ENC : ARITH_FM_MMR6<"xor", 0x310>; class XORI_MMR6_ENC : ADDI_FM_MMR6<"xori", 0x1c>; class ABS_S_MMR6_ENC : POOL32F_ABS_FM_MMR6<"abs.s", 0, 0b0001101>; @@ -1101,7 +1102,9 @@ class BREAK16_MMR6_DESC : BrkSdbbp16MM<"break16", II_BREAK>, MMR6Arch<"break16"> class LI16_MMR6_DESC : LoadImmMM16<"li16", li16_imm, GPRMM16Opnd>, MMR6Arch<"li16">, IsAsCheapAsAMove; class MOVE16_MMR6_DESC : MoveMM16<"move16", GPR32Opnd>, MMR6Arch<"move16">; -class MOVEP_MMR6_DESC : MovePMM16<"movep", GPRMM16OpndMoveP>, MMR6Arch<"movep">; +class MOVEP_MMR6_DESC : MovePMM16<"movep", GPRMM16OpndMovePPairFirst, + GPRMM16OpndMovePPairSecond, GPRMM16OpndMoveP>, + MMR6Arch<"movep">; class SDBBP16_MMR6_DESC : BrkSdbbp16MM<"sdbbp16", II_SDBBP>, MMR6Arch<"sdbbp16">; class SUBU16_MMR6_DESC : ArithRMM16<"subu16", GPRMM16Opnd, 0, II_SUBU, sub>, MMR6Arch<"subu16"> { @@ -1160,6 +1163,14 @@ class SDBBP_MMR6_DESC : MipsR6Inst { InstrItinClass Itinerary = II_SDBBP; } +class SIGRIE_MMR6_DESC : MipsR6Inst { + dag OutOperandList = (outs); + dag InOperandList = (ins uimm16:$code_); + string AsmString = !strconcat("sigrie", "\t$code_"); + list<dag> Pattern = []; + InstrItinClass Itinerary = II_SIGRIE; +} + class LWM16_MMR6_DESC : MicroMipsInst16<(outs reglist16:$rt), (ins mem_mm_4sp:$addr), !strconcat("lwm16", "\t$rt, $addr"), [], @@ -1425,6 +1436,7 @@ def SYNCI_MMR6 : StdMMR6Rel, SYNCI_MMR6_DESC, SYNCI_MMR6_ENC, ISA_MICROMIPS32R6; def RDPGPR_MMR6 : R6MMR6Rel, RDPGPR_MMR6_DESC, RDPGPR_MMR6_ENC, ISA_MICROMIPS32R6; def SDBBP_MMR6 : R6MMR6Rel, SDBBP_MMR6_DESC, SDBBP_MMR6_ENC, ISA_MICROMIPS32R6; +def SIGRIE_MMR6 : R6MMR6Rel, SIGRIE_MMR6_DESC, SIGRIE_MMR6_ENC, ISA_MICROMIPS32R6; def XOR_MMR6 : StdMMR6Rel, XOR_MMR6_DESC, XOR_MMR6_ENC, ISA_MICROMIPS32R6; def XORI_MMR6 : StdMMR6Rel, XORI_MMR6_DESC, XORI_MMR6_ENC, ISA_MICROMIPS32R6; let DecoderMethod = "DecodeMemMMImm16" in { @@ -1633,6 +1645,7 @@ def B_MMR6_Pseudo : MipsAsmPseudoInst<(outs), (ins brtarget_mm:$offset), } def : MipsInstAlias<"sync", (SYNC_MMR6 0), 1>, ISA_MICROMIPS32R6; def : MipsInstAlias<"sdbbp", (SDBBP_MMR6 0), 1>, ISA_MICROMIPS32R6; +def : MipsInstAlias<"sigrie", (SIGRIE_MMR6 0), 1>, ISA_MICROMIPS32R6; def : MipsInstAlias<"rdhwr $rt, $rs", (RDHWR_MMR6 GPR32Opnd:$rt, HWRegsOpnd:$rs, 0), 1>, ISA_MICROMIPS32R6; @@ -1733,7 +1746,7 @@ defm S_MMR6 : Cmp_Pats<f32, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6; defm D_MMR6 : Cmp_Pats<f64, NOR_MMR6, ZERO>, ISA_MICROMIPS32R6; def : MipsPat<(f32 fpimm0), (MTC1_MMR6 ZERO)>, ISA_MICROMIPS32R6; -def : MipsPat<(f32 fpimm0neg), (FNEG_S_MMR6 (MTC1 ZERO))>, ISA_MICROMIPS32R6; +def : MipsPat<(f32 fpimm0neg), (FNEG_S_MMR6 (MTC1_MMR6 ZERO))>, ISA_MICROMIPS32R6; def : MipsPat<(MipsTruncIntFP FGR64Opnd:$src), (TRUNC_W_D_MMR6 FGR64Opnd:$src)>, ISA_MICROMIPS32R6; diff --git a/lib/Target/Mips/MicroMipsInstrFPU.td b/lib/Target/Mips/MicroMipsInstrFPU.td index 84ae0eddf980..1731afc1961f 100644 --- a/lib/Target/Mips/MicroMipsInstrFPU.td +++ b/lib/Target/Mips/MicroMipsInstrFPU.td @@ -243,6 +243,8 @@ let DecoderNamespace = "MicroMipsFP64" in { MFC1_FM_MM<0xe0>, ISA_MICROMIPS, FGR_64; def MFHC1_D64_MM : MFC1_FT<"mfhc1", GPR32Opnd, FGR64Opnd, II_MFHC1>, MFC1_FM_MM<0xc0>, ISA_MICROMIPS, FGR_64; + def MTC1_D64_MM : MTC1_FT<"mtc1", FGR64Opnd, GPR32Opnd, II_MTC1>, + MFC1_FM_MM<0xa0>, ISA_MICROMIPS, FGR_64; } let DecoderNamespace = "MicroMips" in { @@ -405,6 +407,9 @@ let AddedComplexity = 40 in { def : StoreRegImmPat<SWC1_MM, f32>, ISA_MICROMIPS; } +def : MipsPat<(MipsMTC1_D64 GPR32Opnd:$src), + (MTC1_D64_MM GPR32Opnd:$src)>, ISA_MICROMIPS, FGR_64; + def : MipsPat<(f32 fpimm0), (MTC1_MM ZERO)>, ISA_MICROMIPS32_NOT_MIPS32R6; def : MipsPat<(f32 fpimm0neg), (FNEG_S_MM (MTC1_MM ZERO))>, ISA_MICROMIPS32_NOT_MIPS32R6; diff --git a/lib/Target/Mips/MicroMipsInstrFormats.td b/lib/Target/Mips/MicroMipsInstrFormats.td index a9c53e08b810..2a4cc279ef0d 100644 --- a/lib/Target/Mips/MicroMipsInstrFormats.td +++ b/lib/Target/Mips/MicroMipsInstrFormats.td @@ -933,6 +933,17 @@ class SDBBP_FM_MM : MMArch { let Inst{5-0} = 0x3c; } +class SIGRIE_FM_MM : MMArch { + bits<16> code_; + + bits<32> Inst; + + let Inst{31-26} = 0x0; + let Inst{25-22} = 0x0; + let Inst{21-6} = code_; + let Inst{5-0} = 0b111111; +} + class RDHWR_FM_MM : MMArch { bits<5> rt; bits<5> rd; diff --git a/lib/Target/Mips/MicroMipsInstrInfo.td b/lib/Target/Mips/MicroMipsInstrInfo.td index ebadb59a0432..af380a0ec71e 100644 --- a/lib/Target/Mips/MicroMipsInstrInfo.td +++ b/lib/Target/Mips/MicroMipsInstrInfo.td @@ -231,27 +231,14 @@ class StoreLeftRightMM<string opstr, SDNode OpNode, RegisterOperand RO, bit mayStore = 1; } -/// A register pair used by movep instruction. -def MovePRegPairAsmOperand : AsmOperandClass { - let Name = "MovePRegPair"; - let ParserMethod = "parseMovePRegPair"; - let PredicateMethod = "isMovePRegPair"; -} - -def movep_regpair : Operand<i32> { - let EncoderMethod = "getMovePRegPairOpValue"; - let ParserMatchClass = MovePRegPairAsmOperand; - let PrintMethod = "printRegisterList"; - let DecoderMethod = "DecodeMovePRegPair"; - let MIOperandInfo = (ops ptr_rc, ptr_rc); -} - -class MovePMM16<string opstr, RegisterOperand RO> : -MicroMipsInst16<(outs movep_regpair:$dst_regs), (ins RO:$rs, RO:$rt), - !strconcat(opstr, "\t$dst_regs, $rs, $rt"), [], +class MovePMM16<string opstr, RegisterOperand RO1, RegisterOperand RO2, + RegisterOperand RO3> : +MicroMipsInst16<(outs RO1:$rd1, RO2:$rd2), (ins RO3:$rs, RO3:$rt), + !strconcat(opstr, "\t$rd1, $rd2, $rs, $rt"), [], NoItinerary, FrmR> { let isReMaterializable = 1; let isMoveReg = 1; + let DecoderMethod = "DecodeMovePOperands"; } class StorePairMM<string opstr, ComplexPattern Addr = addr> @@ -682,8 +669,9 @@ def MFLO16_MM : MoveFromHILOMM<"mflo16", GPR32Opnd, AC0>, MFHILO_FM_MM16<0x12>, ISA_MICROMIPS32_NOT_MIPS32R6; def MOVE16_MM : MoveMM16<"move", GPR32Opnd>, MOVE_FM_MM16<0x03>, ISA_MICROMIPS32_NOT_MIPS32R6; -def MOVEP_MM : MovePMM16<"movep", GPRMM16OpndMoveP>, MOVEP_FM_MM16, - ISA_MICROMIPS32_NOT_MIPS32R6; +def MOVEP_MM : MovePMM16<"movep", GPRMM16OpndMovePPairFirst, + GPRMM16OpndMovePPairSecond, GPRMM16OpndMoveP>, + MOVEP_FM_MM16, ISA_MICROMIPS32_NOT_MIPS32R6; def LI16_MM : LoadImmMM16<"li16", li16_imm, GPRMM16Opnd>, LI_FM_MM16, IsAsCheapAsAMove, ISA_MICROMIPS32_NOT_MIPS32R6; def JALR16_MM : JumpLinkRegMM16<"jalr", GPR32Opnd>, JALR_FM_MM16<0x0e>, @@ -1116,6 +1104,27 @@ let DecoderNamespace = "MicroMips" in { ISA_MICROMIPS32_NOT_MIPS32R6; } +let AdditionalPredicates = [NotDSP] in { + def PseudoMULT_MM : MultDivPseudo<MULT, ACC64, GPR32Opnd, MipsMult, II_MULT>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMULTu_MM : MultDivPseudo<MULTu, ACC64, GPR32Opnd, MipsMultu, II_MULTU>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMFHI_MM : PseudoMFLOHI<GPR32, ACC64, MipsMFHI>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMFLO_MM : PseudoMFLOHI<GPR32, ACC64, MipsMFLO>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMTLOHI_MM : PseudoMTLOHI<ACC64, GPR32>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMADD_MM : MAddSubPseudo<MADD, MipsMAdd, II_MADD>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMADDU_MM : MAddSubPseudo<MADDU, MipsMAddu, II_MADDU>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMSUB_MM : MAddSubPseudo<MSUB, MipsMSub, II_MSUB>, + ISA_MICROMIPS32_NOT_MIPS32R6; + def PseudoMSUBU_MM : MAddSubPseudo<MSUBU, MipsMSubu, II_MSUBU>, + ISA_MICROMIPS32_NOT_MIPS32R6; +} + def TAILCALL_MM : TailCall<J_MM, jmptarget_mm>, ISA_MIPS1_NOT_32R6_64R6; def TAILCALLREG_MM : TailCallReg<JRC16_MM, GPR32Opnd>, @@ -1262,6 +1271,8 @@ let AddedComplexity = 40 in def : MipsPat<(bswap GPR32:$rt), (ROTR_MM (WSBH_MM GPR32:$rt), 16)>, ISA_MICROMIPS; +def : MipsPat<(MipsJmpLink (i32 texternalsym:$dst)), + (JAL_MM texternalsym:$dst)>, ISA_MICROMIPS32_NOT_MIPS32R6; def : MipsPat<(MipsTailCall (iPTR tglobaladdr:$dst)), (TAILCALL_MM tglobaladdr:$dst)>, ISA_MICROMIPS32_NOT_MIPS32R6; def : MipsPat<(MipsTailCall (iPTR texternalsym:$dst)), diff --git a/lib/Target/Mips/MicroMipsSizeReduction.cpp b/lib/Target/Mips/MicroMipsSizeReduction.cpp index 568cdfb5b110..f9062cc23da2 100644 --- a/lib/Target/Mips/MicroMipsSizeReduction.cpp +++ b/lib/Target/Mips/MicroMipsSizeReduction.cpp @@ -31,13 +31,14 @@ namespace { /// Order of operands to transfer // TODO: Will be extended when additional optimizations are added enum OperandTransfer { - OT_NA, ///< Not applicable - OT_OperandsAll, ///< Transfer all operands - OT_Operands02, ///< Transfer operands 0 and 2 - OT_Operand2, ///< Transfer just operand 2 - OT_OperandsXOR, ///< Transfer operands for XOR16 - OT_OperandsLwp, ///< Transfer operands for LWP - OT_OperandsSwp, ///< Transfer operands for SWP + OT_NA, ///< Not applicable + OT_OperandsAll, ///< Transfer all operands + OT_Operands02, ///< Transfer operands 0 and 2 + OT_Operand2, ///< Transfer just operand 2 + OT_OperandsXOR, ///< Transfer operands for XOR16 + OT_OperandsLwp, ///< Transfer operands for LWP + OT_OperandsSwp, ///< Transfer operands for SWP + OT_OperandsMovep, ///< Transfer operands for MOVEP }; /// Reduction type @@ -170,6 +171,10 @@ private: // returns true on success. static bool ReduceSXtoSX16(ReduceEntryFunArgs *Arguments); + // Attempts to reduce two MOVE instructions into MOVEP instruction, + // returns true on success. + static bool ReduceMoveToMovep(ReduceEntryFunArgs *Arguments); + // Attempts to reduce arithmetic instructions, returns true on success. static bool ReduceArithmeticInstructions(ReduceEntryFunArgs *Arguments); @@ -243,6 +248,8 @@ ReduceEntryVector MicroMipsSizeReduce::ReduceTable = { OpInfo(OT_OperandsLwp), ImmField(0, -2048, 2048, 2)}, {RT_OneInstr, OpCodes(Mips::LW_MM, Mips::LWSP_MM), ReduceXWtoXWSP, OpInfo(OT_OperandsAll), ImmField(2, 0, 32, 2)}, + {RT_TwoInstr, OpCodes(Mips::MOVE16_MM, Mips::MOVEP_MM), ReduceMoveToMovep, + OpInfo(OT_OperandsMovep), ImmField(0, 0, 0, -1)}, {RT_OneInstr, OpCodes(Mips::SB, Mips::SB16_MM), ReduceSXtoSX16, OpInfo(OT_OperandsAll), ImmField(0, 0, 16, 2)}, {RT_OneInstr, OpCodes(Mips::SB_MM, Mips::SB16_MM), ReduceSXtoSX16, @@ -562,6 +569,89 @@ bool MicroMipsSizeReduce::ReduceSXtoSX16(ReduceEntryFunArgs *Arguments) { return ReplaceInstruction(MI, Entry); } +// Returns true if Reg can be a source register +// of MOVEP instruction +static bool IsMovepSrcRegister(unsigned Reg) { + + if (Reg == Mips::ZERO || Reg == Mips::V0 || Reg == Mips::V1 || + Reg == Mips::S0 || Reg == Mips::S1 || Reg == Mips::S2 || + Reg == Mips::S3 || Reg == Mips::S4) + return true; + + return false; +} + +// Returns true if Reg can be a destination register +// of MOVEP instruction +static bool IsMovepDestinationReg(unsigned Reg) { + + if (Reg == Mips::A0 || Reg == Mips::A1 || Reg == Mips::A2 || + Reg == Mips::A3 || Reg == Mips::S5 || Reg == Mips::S6) + return true; + + return false; +} + +// Returns true if the registers can be a pair of destination +// registers in MOVEP instruction +static bool IsMovepDestinationRegPair(unsigned R0, unsigned R1) { + + if ((R0 == Mips::A0 && R1 == Mips::S5) || + (R0 == Mips::A0 && R1 == Mips::S6) || + (R0 == Mips::A0 && R1 == Mips::A1) || + (R0 == Mips::A0 && R1 == Mips::A2) || + (R0 == Mips::A0 && R1 == Mips::A3) || + (R0 == Mips::A1 && R1 == Mips::A2) || + (R0 == Mips::A1 && R1 == Mips::A3) || + (R0 == Mips::A2 && R1 == Mips::A3)) + return true; + + return false; +} + +bool MicroMipsSizeReduce::ReduceMoveToMovep(ReduceEntryFunArgs *Arguments) { + + const ReduceEntry &Entry = Arguments->Entry; + MachineBasicBlock::instr_iterator &NextMII = Arguments->NextMII; + const MachineBasicBlock::instr_iterator &E = + Arguments->MI->getParent()->instr_end(); + + if (NextMII == E) + return false; + + MachineInstr *MI1 = Arguments->MI; + MachineInstr *MI2 = &*NextMII; + + unsigned RegDstMI1 = MI1->getOperand(0).getReg(); + unsigned RegSrcMI1 = MI1->getOperand(1).getReg(); + + if (!IsMovepSrcRegister(RegSrcMI1)) + return false; + + if (!IsMovepDestinationReg(RegDstMI1)) + return false; + + if (MI2->getOpcode() != Entry.WideOpc()) + return false; + + unsigned RegDstMI2 = MI2->getOperand(0).getReg(); + unsigned RegSrcMI2 = MI2->getOperand(1).getReg(); + + if (!IsMovepSrcRegister(RegSrcMI2)) + return false; + + bool ConsecutiveForward; + if (IsMovepDestinationRegPair(RegDstMI1, RegDstMI2)) { + ConsecutiveForward = true; + } else if (IsMovepDestinationRegPair(RegDstMI2, RegDstMI1)) { + ConsecutiveForward = false; + } else + return false; + + NextMII = std::next(NextMII); + return ReplaceInstruction(MI1, Entry, MI2, ConsecutiveForward); +} + bool MicroMipsSizeReduce::ReduceXORtoXOR16(ReduceEntryFunArgs *Arguments) { MachineInstr *MI = Arguments->MI; @@ -641,18 +731,25 @@ bool MicroMipsSizeReduce::ReplaceInstruction(MachineInstr *MI, } break; } + case OT_OperandsMovep: case OT_OperandsLwp: case OT_OperandsSwp: { if (ConsecutiveForward) { MIB.add(MI->getOperand(0)); MIB.add(MI2->getOperand(0)); MIB.add(MI->getOperand(1)); - MIB.add(MI->getOperand(2)); + if (OpTransfer == OT_OperandsMovep) + MIB.add(MI2->getOperand(1)); + else + MIB.add(MI->getOperand(2)); } else { // consecutive backward MIB.add(MI2->getOperand(0)); MIB.add(MI->getOperand(0)); MIB.add(MI2->getOperand(1)); - MIB.add(MI2->getOperand(2)); + if (OpTransfer == OT_OperandsMovep) + MIB.add(MI->getOperand(1)); + else + MIB.add(MI2->getOperand(2)); } LLVM_DEBUG(dbgs() << "and converting 32-bit: " << *MI2 diff --git a/lib/Target/Mips/Mips.h b/lib/Target/Mips/Mips.h index ef3a807c7648..6bb7aecc867a 100644 --- a/lib/Target/Mips/Mips.h +++ b/lib/Target/Mips/Mips.h @@ -38,6 +38,7 @@ namespace llvm { FunctionPass *createMipsConstantIslandPass(); FunctionPass *createMicroMipsSizeReducePass(); FunctionPass *createMipsExpandPseudoPass(); + FunctionPass *createMipsPreLegalizeCombiner(); InstructionSelector *createMipsInstructionSelector(const MipsTargetMachine &, MipsSubtarget &, @@ -46,6 +47,7 @@ namespace llvm { void initializeMipsDelaySlotFillerPass(PassRegistry &); void initializeMipsBranchExpansionPass(PassRegistry &); void initializeMicroMipsSizeReducePass(PassRegistry &); + void initializeMipsPreLegalizerCombinerPass(PassRegistry&); } // end namespace llvm; #endif diff --git a/lib/Target/Mips/Mips16HardFloat.cpp b/lib/Target/Mips/Mips16HardFloat.cpp index c310d9491af8..f237bb6d4006 100644 --- a/lib/Target/Mips/Mips16HardFloat.cpp +++ b/lib/Target/Mips/Mips16HardFloat.cpp @@ -74,16 +74,18 @@ static FPReturnVariant whichFPReturnVariant(Type *T) { return FRet; case Type::DoubleTyID: return DRet; - case Type::StructTyID: - if (T->getStructNumElements() != 2) + case Type::StructTyID: { + StructType *ST = cast<StructType>(T); + if (ST->getNumElements() != 2) break; - if ((T->getContainedType(0)->isFloatTy()) && - (T->getContainedType(1)->isFloatTy())) + if ((ST->getElementType(0)->isFloatTy()) && + (ST->getElementType(1)->isFloatTy())) return CFRet; - if ((T->getContainedType(0)->isDoubleTy()) && - (T->getContainedType(1)->isDoubleTy())) + if ((ST->getElementType(0)->isDoubleTy()) && + (ST->getElementType(1)->isDoubleTy())) return CDRet; break; + } default: break; } diff --git a/lib/Target/Mips/Mips16ISelLowering.cpp b/lib/Target/Mips/Mips16ISelLowering.cpp index 8ce47e3f669d..79df622241a0 100644 --- a/lib/Target/Mips/Mips16ISelLowering.cpp +++ b/lib/Target/Mips/Mips16ISelLowering.cpp @@ -386,27 +386,22 @@ const char* Mips16TargetLowering:: } else if (RetTy ->isDoubleTy()) { result = dfMips16Helper[stubNum]; - } - else if (RetTy->isStructTy()) { + } else if (StructType *SRetTy = dyn_cast<StructType>(RetTy)) { // check if it's complex - if (RetTy->getNumContainedTypes() == 2) { - if ((RetTy->getContainedType(0)->isFloatTy()) && - (RetTy->getContainedType(1)->isFloatTy())) { + if (SRetTy->getNumElements() == 2) { + if ((SRetTy->getElementType(0)->isFloatTy()) && + (SRetTy->getElementType(1)->isFloatTy())) { result = scMips16Helper[stubNum]; - } - else if ((RetTy->getContainedType(0)->isDoubleTy()) && - (RetTy->getContainedType(1)->isDoubleTy())) { + } else if ((SRetTy->getElementType(0)->isDoubleTy()) && + (SRetTy->getElementType(1)->isDoubleTy())) { result = dcMips16Helper[stubNum]; - } - else { + } else { llvm_unreachable("Uncovered condition"); } - } - else { + } else { llvm_unreachable("Uncovered condition"); } - } - else { + } else { if (stubNum == 0) { needHelper = false; return ""; diff --git a/lib/Target/Mips/Mips16InstrInfo.cpp b/lib/Target/Mips/Mips16InstrInfo.cpp index 219f1ad33586..efebc99b5dae 100644 --- a/lib/Target/Mips/Mips16InstrInfo.cpp +++ b/lib/Target/Mips/Mips16InstrInfo.cpp @@ -97,9 +97,9 @@ void Mips16InstrInfo::copyPhysReg(MachineBasicBlock &MBB, MIB.addReg(SrcReg, getKillRegState(KillSrc)); } -bool Mips16InstrInfo::isCopyInstr(const MachineInstr &MI, - const MachineOperand *&Src, - const MachineOperand *&Dest) const { +bool Mips16InstrInfo::isCopyInstrImpl(const MachineInstr &MI, + const MachineOperand *&Src, + const MachineOperand *&Dest) const { if (MI.isMoveReg()) { Dest = &MI.getOperand(0); Src = &MI.getOperand(1); diff --git a/lib/Target/Mips/Mips16InstrInfo.h b/lib/Target/Mips/Mips16InstrInfo.h index 8190be6187ea..6a802e4cce5d 100644 --- a/lib/Target/Mips/Mips16InstrInfo.h +++ b/lib/Target/Mips/Mips16InstrInfo.h @@ -53,9 +53,6 @@ public: const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override; - bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Src, - const MachineOperand *&Dest) const override; - void storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, @@ -105,6 +102,14 @@ public: void BuildAddiuSpImm (MachineBasicBlock &MBB, MachineBasicBlock::iterator I, int64_t Imm) const; + +protected: + /// If the specific machine instruction is a instruction that moves/copies + /// value from one register to another register return true along with + /// @Source machine operand and @Destination machine operand. + bool isCopyInstrImpl(const MachineInstr &MI, const MachineOperand *&Source, + const MachineOperand *&Destination) const override; + private: unsigned getAnalyzableBrOpc(unsigned Opc) const override; diff --git a/lib/Target/Mips/Mips32r6InstrFormats.td b/lib/Target/Mips/Mips32r6InstrFormats.td index e1d08cad88b7..623af570a5e6 100644 --- a/lib/Target/Mips/Mips32r6InstrFormats.td +++ b/lib/Target/Mips/Mips32r6InstrFormats.td @@ -87,6 +87,7 @@ def OPCODE5_BC1NEZ : OPCODE5<0b01101>; def OPCODE5_BC2EQZ : OPCODE5<0b01001>; def OPCODE5_BC2NEZ : OPCODE5<0b01101>; def OPCODE5_BGEZAL : OPCODE5<0b10001>; +def OPCODE5_SIGRIE : OPCODE5<0b10111>; // The next four constants are unnamed in the spec. These names are taken from // the OPGROUP names they are used with. def OPCODE5_LDC2 : OPCODE5<0b01110>; @@ -602,3 +603,12 @@ class SPECIAL3_GINV<bits<2> ginv> : MipsR6Inst { let Inst{7-6} = ginv; let Inst{5-0} = 0b111101; } + +class SIGRIE_FM : MipsR6Inst { + bits<16> code_; + + let Inst{31-26} = OPGROUP_REGIMM.Value; + let Inst{25-21} = 0; + let Inst{20-16} = OPCODE5_SIGRIE.Value; + let Inst{15-0} = code_; +} diff --git a/lib/Target/Mips/Mips32r6InstrInfo.td b/lib/Target/Mips/Mips32r6InstrInfo.td index d86fc3f658ae..2bd0cf2d59a6 100644 --- a/lib/Target/Mips/Mips32r6InstrInfo.td +++ b/lib/Target/Mips/Mips32r6InstrInfo.td @@ -200,6 +200,8 @@ class CRC32CW_ENC : SPECIAL3_2R_SZ_CRC<2,1>; class GINVI_ENC : SPECIAL3_GINV<0>; class GINVT_ENC : SPECIAL3_GINV<2>; +class SIGRIE_ENC : SIGRIE_FM; + //===----------------------------------------------------------------------===// // // Instruction Multiclasses @@ -846,6 +848,14 @@ class GINVI_DESC : GINV_DESC_BASE<"ginvi", GPR32Opnd, II_GINVI> { } class GINVT_DESC : GINV_DESC_BASE<"ginvt", GPR32Opnd, II_GINVT>; +class SIGRIE_DESC { + dag OutOperandList = (outs); + dag InOperandList = (ins uimm16:$code_); + string AsmString = "sigrie\t$code_"; + list<dag> Pattern = []; + InstrItinClass Itinerary = II_SIGRIE; +} + //===----------------------------------------------------------------------===// // // Instruction Definitions @@ -961,6 +971,7 @@ let AdditionalPredicates = [NotInMicroMips] in { def SEL_S : R6MMR6Rel, SEL_S_ENC, SEL_S_DESC, ISA_MIPS32R6, HARDFLOAT; def SDC2_R6 : SDC2_R6_ENC, SDC2_R6_DESC, ISA_MIPS32R6; def SWC2_R6 : SWC2_R6_ENC, SWC2_R6_DESC, ISA_MIPS32R6; + def SIGRIE : SIGRIE_ENC, SIGRIE_DESC, ISA_MIPS32R6; } let AdditionalPredicates = [NotInMicroMips] in { @@ -988,6 +999,7 @@ def : MipsInstAlias<"evp", (EVP ZERO), 0>, ISA_MIPS32R6; let AdditionalPredicates = [NotInMicroMips] in { def : MipsInstAlias<"sdbbp", (SDBBP_R6 0)>, ISA_MIPS32R6; +def : MipsInstAlias<"sigrie", (SIGRIE 0)>, ISA_MIPS32R6; def : MipsInstAlias<"jr $rs", (JALR ZERO, GPR32Opnd:$rs), 1>, ISA_MIPS32R6, GPR_32; } diff --git a/lib/Target/Mips/Mips64InstrInfo.td b/lib/Target/Mips/Mips64InstrInfo.td index 878ec29b188d..5729182deafb 100644 --- a/lib/Target/Mips/Mips64InstrInfo.td +++ b/lib/Target/Mips/Mips64InstrInfo.td @@ -416,6 +416,13 @@ let isCodeGenOnly = 1, rs = 0, shamt = 0 in { // long branches. See the comment in file MipsLongBranch.cpp for detailed // explanation. +// Expands to: lui $dst, %highest/%higher/%hi/%lo($tgt) +def LONG_BRANCH_LUi2Op_64 : PseudoSE<(outs GPR64Opnd:$dst), + (ins brtarget:$tgt), []>, GPR_64; +// Expands to: addiu $dst, %highest/%higher/%hi/%lo($tgt) +def LONG_BRANCH_DADDiu2Op : PseudoSE<(outs GPR64Opnd:$dst), + (ins GPR64Opnd:$src, brtarget:$tgt), []>, GPR_64; + // Expands to: daddiu $dst, $src, %PART($tgt - $baltgt) // where %PART may be %hi or %lo, depending on the relocation kind // that $tgt is annotated with. @@ -838,7 +845,7 @@ def : MipsPat<(i64 (sext (i32 (sub GPR32:$src, GPR32:$src2)))), (SUBu GPR32:$src, GPR32:$src2), sub_32)>; def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))), (INSERT_SUBREG (i64 (IMPLICIT_DEF)), - (MUL GPR32:$src, GPR32:$src2), sub_32)>; + (MUL GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS3_NOT_32R6_64R6; def : MipsPat<(i64 (sext (i32 (MipsMFHI ACC64:$src)))), (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (PseudoMFHI ACC64:$src), sub_32)>; @@ -1139,3 +1146,6 @@ def SLTUImm64 : MipsAsmPseudoInst<(outs GPR64Opnd:$rs), "sltu\t$rs, $rt, $imm">, GPR_64; def : MipsInstAlias<"sltu\t$rs, $imm", (SLTUImm64 GPR64Opnd:$rs, GPR64Opnd:$rs, imm64:$imm)>, GPR_64; + +def : MipsInstAlias<"rdhwr $rt, $rs", + (RDHWR64 GPR64Opnd:$rt, HWRegsOpnd:$rs, 0), 1>, GPR_64; diff --git a/lib/Target/Mips/Mips64r6InstrInfo.td b/lib/Target/Mips/Mips64r6InstrInfo.td index 9df802cc30b9..ac223bc77256 100644 --- a/lib/Target/Mips/Mips64r6InstrInfo.td +++ b/lib/Target/Mips/Mips64r6InstrInfo.td @@ -301,6 +301,9 @@ def : MipsPat<(select (i32 (seteq i32:$cond, immz)), immz, i64:$f), // Patterns used for matching away redundant sign extensions. // MIPS32 arithmetic instructions sign extend their result implicitly. +def : MipsPat<(i64 (sext (i32 (mul GPR32:$src, GPR32:$src2)))), + (INSERT_SUBREG (i64 (IMPLICIT_DEF)), + (MUL_R6 GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS64R6; def : MipsPat<(i64 (sext (i32 (sdiv GPR32:$src, GPR32:$src2)))), (INSERT_SUBREG (i64 (IMPLICIT_DEF)), (DIV GPR32:$src, GPR32:$src2), sub_32)>, ISA_MIPS64R6; diff --git a/lib/Target/Mips/MipsAsmPrinter.cpp b/lib/Target/Mips/MipsAsmPrinter.cpp index 2e0c25de2bc8..362431fd42a6 100644 --- a/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/lib/Target/Mips/MipsAsmPrinter.cpp @@ -561,6 +561,7 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, O << '$' << MipsInstPrinter::getRegisterName(Reg); return false; } + break; } case 'w': // Print MSA registers for the 'f' constraint @@ -1203,18 +1204,23 @@ void MipsAsmPrinter::PrintDebugValueComment(const MachineInstr *MI, // Emit .dtprelword or .dtpreldword directive // and value for debug thread local expression. -void MipsAsmPrinter::EmitDebugThreadLocal(const MCExpr *Value, - unsigned Size) const { - switch (Size) { - case 4: - OutStreamer->EmitDTPRel32Value(Value); - break; - case 8: - OutStreamer->EmitDTPRel64Value(Value); - break; - default: - llvm_unreachable("Unexpected size of expression value."); +void MipsAsmPrinter::EmitDebugValue(const MCExpr *Value, unsigned Size) const { + if (auto *MipsExpr = dyn_cast<MipsMCExpr>(Value)) { + if (MipsExpr && MipsExpr->getKind() == MipsMCExpr::MEK_DTPREL) { + switch (Size) { + case 4: + OutStreamer->EmitDTPRel32Value(MipsExpr->getSubExpr()); + break; + case 8: + OutStreamer->EmitDTPRel64Value(MipsExpr->getSubExpr()); + break; + default: + llvm_unreachable("Unexpected size of expression value."); + } + return; + } } + AsmPrinter::EmitDebugValue(Value, Size); } // Align all targets of indirect branches on bundle size. Used only if target @@ -1240,8 +1246,12 @@ void MipsAsmPrinter::NaClAlignIndirectJumpTargets(MachineFunction &MF) { bool MipsAsmPrinter::isLongBranchPseudo(int Opcode) const { return (Opcode == Mips::LONG_BRANCH_LUi + || Opcode == Mips::LONG_BRANCH_LUi2Op + || Opcode == Mips::LONG_BRANCH_LUi2Op_64 || Opcode == Mips::LONG_BRANCH_ADDiu - || Opcode == Mips::LONG_BRANCH_DADDiu); + || Opcode == Mips::LONG_BRANCH_ADDiu2Op + || Opcode == Mips::LONG_BRANCH_DADDiu + || Opcode == Mips::LONG_BRANCH_DADDiu2Op); } // Force static initialization. diff --git a/lib/Target/Mips/MipsAsmPrinter.h b/lib/Target/Mips/MipsAsmPrinter.h index 999b6f896bae..eb58234e3e77 100644 --- a/lib/Target/Mips/MipsAsmPrinter.h +++ b/lib/Target/Mips/MipsAsmPrinter.h @@ -160,7 +160,7 @@ public: void EmitStartOfAsmFile(Module &M) override; void EmitEndOfAsmFile(Module &M) override; void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS); - void EmitDebugThreadLocal(const MCExpr *Value, unsigned Size) const override; + void EmitDebugValue(const MCExpr *Value, unsigned Size) const override; }; } // end namespace llvm diff --git a/lib/Target/Mips/MipsBranchExpansion.cpp b/lib/Target/Mips/MipsBranchExpansion.cpp index af936e6fc96b..e59267c4fd9b 100644 --- a/lib/Target/Mips/MipsBranchExpansion.cpp +++ b/lib/Target/Mips/MipsBranchExpansion.cpp @@ -128,6 +128,7 @@ struct MBBInfo { uint64_t Size = 0; bool HasLongBranch = false; MachineInstr *Br = nullptr; + uint64_t Offset = 0; MBBInfo() = default; }; @@ -154,8 +155,11 @@ private: void splitMBB(MachineBasicBlock *MBB); void initMBBInfo(); int64_t computeOffset(const MachineInstr *Br); + uint64_t computeOffsetFromTheBeginning(int MBB); void replaceBranch(MachineBasicBlock &MBB, Iter Br, const DebugLoc &DL, MachineBasicBlock *MBBOpnd); + bool buildProperJumpMI(MachineBasicBlock *MBB, + MachineBasicBlock::iterator Pos, DebugLoc DL); void expandToLongBranch(MBBInfo &Info); bool handleForbiddenSlot(); bool handlePossibleLongBranch(); @@ -167,7 +171,6 @@ private: SmallVector<MBBInfo, 16> MBBInfos; bool IsPIC; MipsABIInfo ABI; - unsigned LongBranchSeqSize; bool ForceLongBranchFirstPass = false; }; @@ -176,7 +179,7 @@ private: char MipsBranchExpansion::ID = 0; INITIALIZE_PASS(MipsBranchExpansion, DEBUG_TYPE, - "Expand out of range branch instructions and prevent forbidden" + "Expand out of range branch instructions and fix forbidden" " slot hazards", false, false) @@ -268,7 +271,8 @@ void MipsBranchExpansion::splitMBB(MachineBasicBlock *MBB) { // Insert NewMBB and fix control flow. MachineBasicBlock *Tgt = getTargetMBB(*FirstBr); NewMBB->transferSuccessors(MBB); - NewMBB->removeSuccessor(Tgt, true); + if (Tgt != getTargetMBB(*LastBr)) + NewMBB->removeSuccessor(Tgt, true); MBB->addSuccessor(NewMBB); MBB->addSuccessor(Tgt); MFp->insert(std::next(MachineFunction::iterator(MBB)), NewMBB); @@ -294,14 +298,6 @@ void MipsBranchExpansion::initMBBInfo() { for (MachineBasicBlock::instr_iterator MI = MBB->instr_begin(); MI != MBB->instr_end(); ++MI) MBBInfos[I].Size += TII->getInstSizeInBytes(*MI); - - // Search for MBB's branch instruction. - ReverseIter End = MBB->rend(); - ReverseIter Br = getNonDebugInstr(MBB->rbegin(), End); - - if ((Br != End) && !Br->isIndirectBranch() && - (Br->isConditionalBranch() || (Br->isUnconditionalBranch() && IsPIC))) - MBBInfos[I].Br = &*Br; } } @@ -326,6 +322,14 @@ int64_t MipsBranchExpansion::computeOffset(const MachineInstr *Br) { return -Offset + 4; } +// Returns the distance in bytes up until MBB +uint64_t MipsBranchExpansion::computeOffsetFromTheBeginning(int MBB) { + uint64_t Offset = 0; + for (int N = 0; N < MBB; ++N) + Offset += MBBInfos[N].Size; + return Offset; +} + // Replace Br with a branch which has the opposite condition code and a // MachineBasicBlock operand MBBOpnd. void MipsBranchExpansion::replaceBranch(MachineBasicBlock &MBB, Iter Br, @@ -359,6 +363,35 @@ void MipsBranchExpansion::replaceBranch(MachineBasicBlock &MBB, Iter Br, Br->eraseFromParent(); } +bool MipsBranchExpansion::buildProperJumpMI(MachineBasicBlock *MBB, + MachineBasicBlock::iterator Pos, + DebugLoc DL) { + bool HasR6 = ABI.IsN64() ? STI->hasMips64r6() : STI->hasMips32r6(); + bool AddImm = HasR6 && !STI->useIndirectJumpsHazard(); + + unsigned JR = ABI.IsN64() ? Mips::JR64 : Mips::JR; + unsigned JIC = ABI.IsN64() ? Mips::JIC64 : Mips::JIC; + unsigned JR_HB = ABI.IsN64() ? Mips::JR_HB64 : Mips::JR_HB; + unsigned JR_HB_R6 = ABI.IsN64() ? Mips::JR_HB64_R6 : Mips::JR_HB_R6; + + unsigned JumpOp; + if (STI->useIndirectJumpsHazard()) + JumpOp = HasR6 ? JR_HB_R6 : JR_HB; + else + JumpOp = HasR6 ? JIC : JR; + + if (JumpOp == Mips::JIC && STI->inMicroMipsMode()) + JumpOp = Mips::JIC_MMR6; + + unsigned ATReg = ABI.IsN64() ? Mips::AT_64 : Mips::AT; + MachineInstrBuilder Instr = + BuildMI(*MBB, Pos, DL, TII->get(JumpOp)).addReg(ATReg); + if (AddImm) + Instr.addImm(0); + + return !AddImm; +} + // Expand branch instructions to long branches. // TODO: This function has to be fixed for beqz16 and bnez16, because it // currently assumes that all branches have 16-bit offsets, and will produce @@ -479,33 +512,21 @@ void MipsBranchExpansion::expandToLongBranch(MBBInfo &I) { // In NaCl, modifying the sp is not allowed in branch delay slot. // For MIPS32R6, we can skip using a delay slot branch. - if (STI->isTargetNaCl() || - (STI->hasMips32r6() && !STI->useIndirectJumpsHazard())) - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP) + bool hasDelaySlot = buildProperJumpMI(BalTgtMBB, Pos, DL); + + if (STI->isTargetNaCl() || !hasDelaySlot) { + BuildMI(*BalTgtMBB, std::prev(Pos), DL, TII->get(Mips::ADDiu), Mips::SP) .addReg(Mips::SP) .addImm(8); - - if (STI->hasMips32r6() && !STI->useIndirectJumpsHazard()) { - const unsigned JICOp = - STI->inMicroMipsMode() ? Mips::JIC_MMR6 : Mips::JIC; - BuildMI(*BalTgtMBB, Pos, DL, TII->get(JICOp)) - .addReg(Mips::AT) - .addImm(0); - - } else { - unsigned JROp = - STI->useIndirectJumpsHazard() - ? (STI->hasMips32r6() ? Mips::JR_HB_R6 : Mips::JR_HB) - : Mips::JR; - BuildMI(*BalTgtMBB, Pos, DL, TII->get(JROp)).addReg(Mips::AT); - + } + if (hasDelaySlot) { if (STI->isTargetNaCl()) { BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::NOP)); - } else + } else { BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::ADDiu), Mips::SP) .addReg(Mips::SP) .addImm(8); - + } BalTgtMBB->rbegin()->bundleWithPred(); } } else { @@ -597,46 +618,94 @@ void MipsBranchExpansion::expandToLongBranch(MBBInfo &I) { .addReg(Mips::SP_64) .addImm(0); - if (STI->hasMips64r6() && !STI->useIndirectJumpsHazard()) { - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64) + bool hasDelaySlot = buildProperJumpMI(BalTgtMBB, Pos, DL); + // If there is no delay slot, Insert stack adjustment before + if (!hasDelaySlot) { + BuildMI(*BalTgtMBB, std::prev(Pos), DL, TII->get(Mips::DADDiu), + Mips::SP_64) .addReg(Mips::SP_64) .addImm(16); - BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::JIC64)) - .addReg(Mips::AT_64) - .addImm(0); } else { - unsigned JROp = - STI->useIndirectJumpsHazard() - ? (STI->hasMips32r6() ? Mips::JR_HB64_R6 : Mips::JR_HB64) - : Mips::JR64; - BuildMI(*BalTgtMBB, Pos, DL, TII->get(JROp)).addReg(Mips::AT_64); BuildMI(*BalTgtMBB, Pos, DL, TII->get(Mips::DADDiu), Mips::SP_64) .addReg(Mips::SP_64) .addImm(16); BalTgtMBB->rbegin()->bundleWithPred(); } } - - assert(LongBrMBB->size() + BalTgtMBB->size() == LongBranchSeqSize); - } else { - // Pre R6: R6: - // $longbr: $longbr: - // j $tgt bc $tgt - // nop $fallthrough - // $fallthrough: - // + } else { // Not PIC Pos = LongBrMBB->begin(); LongBrMBB->addSuccessor(TgtMBB); - if (STI->hasMips32r6()) + + // Compute the position of the potentiall jump instruction (basic blocks + // before + 4 for the instruction) + uint64_t JOffset = computeOffsetFromTheBeginning(MBB->getNumber()) + + MBBInfos[MBB->getNumber()].Size + 4; + uint64_t TgtMBBOffset = computeOffsetFromTheBeginning(TgtMBB->getNumber()); + // If it's a forward jump, then TgtMBBOffset will be shifted by two + // instructions + if (JOffset < TgtMBBOffset) + TgtMBBOffset += 2 * 4; + // Compare 4 upper bits to check if it's the same segment + bool SameSegmentJump = JOffset >> 28 == TgtMBBOffset >> 28; + + if (STI->hasMips32r6() && TII->isBranchOffsetInRange(Mips::BC, I.Offset)) { + // R6: + // $longbr: + // bc $tgt + // $fallthrough: + // BuildMI(*LongBrMBB, Pos, DL, TII->get(STI->inMicroMipsMode() ? Mips::BC_MMR6 : Mips::BC)) .addMBB(TgtMBB); - else + } else if (SameSegmentJump) { + // Pre R6: + // $longbr: + // j $tgt + // nop + // $fallthrough: + // MIBundleBuilder(*LongBrMBB, Pos) .append(BuildMI(*MFp, DL, TII->get(Mips::J)).addMBB(TgtMBB)) .append(BuildMI(*MFp, DL, TII->get(Mips::NOP))); - - assert(LongBrMBB->size() == LongBranchSeqSize); + } else { + // At this point, offset where we need to branch does not fit into + // immediate field of the branch instruction and is not in the same + // segment as jump instruction. Therefore we will break it into couple + // instructions, where we first load the offset into register, and then we + // do branch register. + if (ABI.IsN64()) { + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_LUi2Op_64), + Mips::AT_64) + .addMBB(TgtMBB, MipsII::MO_HIGHEST); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_DADDiu2Op), + Mips::AT_64) + .addReg(Mips::AT_64) + .addMBB(TgtMBB, MipsII::MO_HIGHER); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64) + .addReg(Mips::AT_64) + .addImm(16); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_DADDiu2Op), + Mips::AT_64) + .addReg(Mips::AT_64) + .addMBB(TgtMBB, MipsII::MO_ABS_HI); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::DSLL), Mips::AT_64) + .addReg(Mips::AT_64) + .addImm(16); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_DADDiu2Op), + Mips::AT_64) + .addReg(Mips::AT_64) + .addMBB(TgtMBB, MipsII::MO_ABS_LO); + } else { + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_LUi2Op), + Mips::AT) + .addMBB(TgtMBB, MipsII::MO_ABS_HI); + BuildMI(*LongBrMBB, Pos, DL, TII->get(Mips::LONG_BRANCH_ADDiu2Op), + Mips::AT) + .addReg(Mips::AT) + .addMBB(TgtMBB, MipsII::MO_ABS_LO); + } + buildProperJumpMI(LongBrMBB, Pos, DL); + } } if (I.Br->isUnconditionalBranch()) { @@ -666,8 +735,6 @@ bool MipsBranchExpansion::handleForbiddenSlot() { if (!STI->hasMips32r6() || STI->inMicroMipsMode()) return false; - const MipsInstrInfo *TII = STI->getInstrInfo(); - bool Changed = false; for (MachineFunction::iterator FI = MFp->begin(); FI != MFp->end(); ++FI) { @@ -704,66 +771,65 @@ bool MipsBranchExpansion::handleForbiddenSlot() { } bool MipsBranchExpansion::handlePossibleLongBranch() { - - LongBranchSeqSize = IsPIC ? ((ABI.IsN64() || STI->isTargetNaCl()) ? 10 : 9) - : (STI->hasMips32r6() ? 1 : 2); - if (STI->inMips16Mode() || !STI->enableLongBranchPass()) return false; if (SkipLongBranch) return false; - initMBBInfo(); - - SmallVectorImpl<MBBInfo>::iterator I, E = MBBInfos.end(); bool EverMadeChange = false, MadeChange = true; while (MadeChange) { MadeChange = false; - for (I = MBBInfos.begin(); I != E; ++I) { - // Skip if this MBB doesn't have a branch or the branch has already been - // converted to a long branch. - if (!I->Br || I->HasLongBranch) - continue; + initMBBInfo(); - int64_t Offset = computeOffset(I->Br); + for (unsigned I = 0, E = MBBInfos.size(); I < E; ++I) { + MachineBasicBlock *MBB = MFp->getBlockNumbered(I); + // Search for MBB's branch instruction. + ReverseIter End = MBB->rend(); + ReverseIter Br = getNonDebugInstr(MBB->rbegin(), End); - if (STI->isTargetNaCl()) { - // The offset calculation does not include sandboxing instructions - // that will be added later in the MC layer. Since at this point we - // don't know the exact amount of code that "sandboxing" will add, we - // conservatively estimate that code will not grow more than 100%. - Offset *= 2; - } + if ((Br != End) && Br->isBranch() && !Br->isIndirectBranch() && + (Br->isConditionalBranch() || + (Br->isUnconditionalBranch() && IsPIC))) { + int64_t Offset = computeOffset(&*Br); - // Check if offset fits into the immediate field of the branch. - if (!ForceLongBranchFirstPass && - TII->isBranchOffsetInRange(I->Br->getOpcode(), Offset)) - continue; + if (STI->isTargetNaCl()) { + // The offset calculation does not include sandboxing instructions + // that will be added later in the MC layer. Since at this point we + // don't know the exact amount of code that "sandboxing" will add, we + // conservatively estimate that code will not grow more than 100%. + Offset *= 2; + } - I->HasLongBranch = true; - I->Size += LongBranchSeqSize * 4; - ++LongBranches; - EverMadeChange = MadeChange = true; - } - } + if (ForceLongBranchFirstPass || + !TII->isBranchOffsetInRange(Br->getOpcode(), Offset)) { + MBBInfos[I].Offset = Offset; + MBBInfos[I].Br = &*Br; + } + } + } // End for - ForceLongBranchFirstPass = false; + ForceLongBranchFirstPass = false; - if (!EverMadeChange) - return false; + SmallVectorImpl<MBBInfo>::iterator I, E = MBBInfos.end(); + + for (I = MBBInfos.begin(); I != E; ++I) { + // Skip if this MBB doesn't have a branch or the branch has already been + // converted to a long branch. + if (!I->Br) + continue; - // Do the expansion. - for (I = MBBInfos.begin(); I != E; ++I) - if (I->HasLongBranch) { expandToLongBranch(*I); + ++LongBranches; + EverMadeChange = MadeChange = true; } - MFp->RenumberBlocks(); + MFp->RenumberBlocks(); + } - return true; + return EverMadeChange; } bool MipsBranchExpansion::runOnMachineFunction(MachineFunction &MF) { diff --git a/lib/Target/Mips/MipsCCState.cpp b/lib/Target/Mips/MipsCCState.cpp index 81a1cced93b7..90cb3f437bd5 100644 --- a/lib/Target/Mips/MipsCCState.cpp +++ b/lib/Target/Mips/MipsCCState.cpp @@ -24,10 +24,10 @@ static bool isF128SoftLibCall(const char *CallSym) { "__lttf2", "__multf3", "__netf2", "__powitf2", "__subtf3", "__trunctfdf2", "__trunctfsf2", "__unordtf2", "ceill", "copysignl", "cosl", "exp2l", - "expl", "floorl", "fmal", "fmodl", - "log10l", "log2l", "logl", "nearbyintl", - "powl", "rintl", "roundl", "sinl", - "sqrtl", "truncl"}; + "expl", "floorl", "fmal", "fmaxl", + "fmodl", "log10l", "log2l", "logl", + "nearbyintl", "powl", "rintl", "roundl", + "sinl", "sqrtl", "truncl"}; // Check that LibCalls is sorted alphabetically. auto Comp = [](const char *S1, const char *S2) { return strcmp(S1, S2) < 0; }; diff --git a/lib/Target/Mips/MipsCallLowering.cpp b/lib/Target/Mips/MipsCallLowering.cpp index a705ebb6b193..c550fadf6632 100644 --- a/lib/Target/Mips/MipsCallLowering.cpp +++ b/lib/Target/Mips/MipsCallLowering.cpp @@ -16,6 +16,7 @@ #include "MipsCallLowering.h" #include "MipsCCState.h" #include "MipsTargetMachine.h" +#include "llvm/CodeGen/Analysis.h" #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" using namespace llvm; @@ -23,48 +24,89 @@ using namespace llvm; MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI) : CallLowering(&TLI) {} -bool MipsCallLowering::MipsHandler::assign(const CCValAssign &VA, - unsigned vreg) { +bool MipsCallLowering::MipsHandler::assign(unsigned VReg, + const CCValAssign &VA) { if (VA.isRegLoc()) { - assignValueToReg(vreg, VA.getLocReg()); + assignValueToReg(VReg, VA); } else if (VA.isMemLoc()) { - unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; - unsigned Offset = VA.getLocMemOffset(); - MachinePointerInfo MPO; - unsigned StackAddr = getStackAddress(Size, Offset, MPO); - assignValueToAddress(vreg, StackAddr, Size, MPO); + assignValueToAddress(VReg, VA); } else { return false; } return true; } +bool MipsCallLowering::MipsHandler::assignVRegs(ArrayRef<unsigned> VRegs, + ArrayRef<CCValAssign> ArgLocs, + unsigned ArgLocsStartIndex) { + for (unsigned i = 0; i < VRegs.size(); ++i) + if (!assign(VRegs[i], ArgLocs[ArgLocsStartIndex + i])) + return false; + return true; +} + +void MipsCallLowering::MipsHandler::setLeastSignificantFirst( + SmallVectorImpl<unsigned> &VRegs) { + if (!MIRBuilder.getMF().getDataLayout().isLittleEndian()) + std::reverse(VRegs.begin(), VRegs.end()); +} + +bool MipsCallLowering::MipsHandler::handle( + ArrayRef<CCValAssign> ArgLocs, ArrayRef<CallLowering::ArgInfo> Args) { + SmallVector<unsigned, 4> VRegs; + unsigned SplitLength; + const Function &F = MIRBuilder.getMF().getFunction(); + const DataLayout &DL = F.getParent()->getDataLayout(); + const MipsTargetLowering &TLI = *static_cast<const MipsTargetLowering *>( + MIRBuilder.getMF().getSubtarget().getTargetLowering()); + + for (unsigned ArgsIndex = 0, ArgLocsIndex = 0; ArgsIndex < Args.size(); + ++ArgsIndex, ArgLocsIndex += SplitLength) { + EVT VT = TLI.getValueType(DL, Args[ArgsIndex].Ty); + SplitLength = TLI.getNumRegistersForCallingConv(F.getContext(), + F.getCallingConv(), VT); + if (SplitLength > 1) { + VRegs.clear(); + MVT RegisterVT = TLI.getRegisterTypeForCallingConv( + F.getContext(), F.getCallingConv(), VT); + for (unsigned i = 0; i < SplitLength; ++i) + VRegs.push_back(MRI.createGenericVirtualRegister(LLT{RegisterVT})); + + if (!handleSplit(VRegs, ArgLocs, ArgLocsIndex, Args[ArgsIndex].Reg)) + return false; + } else { + if (!assign(Args[ArgsIndex].Reg, ArgLocs[ArgLocsIndex])) + return false; + } + } + return true; +} + namespace { class IncomingValueHandler : public MipsCallLowering::MipsHandler { public: IncomingValueHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI) : MipsHandler(MIRBuilder, MRI) {} - bool handle(ArrayRef<CCValAssign> ArgLocs, - ArrayRef<CallLowering::ArgInfo> Args); - private: - void assignValueToReg(unsigned ValVReg, unsigned PhysReg) override; + void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; + + unsigned getStackAddress(const CCValAssign &VA, + MachineMemOperand *&MMO) override; - unsigned getStackAddress(uint64_t Size, int64_t Offset, - MachinePointerInfo &MPO) override; + void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, - MachinePointerInfo &MPO) override; + bool handleSplit(SmallVectorImpl<unsigned> &VRegs, + ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, + unsigned ArgsReg) override; virtual void markPhysRegUsed(unsigned PhysReg) { MIRBuilder.getMBB().addLiveIn(PhysReg); } - void buildLoad(unsigned Val, unsigned Addr, uint64_t Size, unsigned Alignment, - MachinePointerInfo &MPO) { - MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand( - MPO, MachineMemOperand::MOLoad, Size, Alignment); + void buildLoad(unsigned Val, const CCValAssign &VA) { + MachineMemOperand *MMO; + unsigned Addr = getStackAddress(VA, MMO); MIRBuilder.buildLoad(Val, Addr, *MMO); } }; @@ -86,17 +128,34 @@ private: } // end anonymous namespace void IncomingValueHandler::assignValueToReg(unsigned ValVReg, - unsigned PhysReg) { - MIRBuilder.buildCopy(ValVReg, PhysReg); + const CCValAssign &VA) { + unsigned PhysReg = VA.getLocReg(); + switch (VA.getLocInfo()) { + case CCValAssign::LocInfo::SExt: + case CCValAssign::LocInfo::ZExt: + case CCValAssign::LocInfo::AExt: { + auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg); + MIRBuilder.buildTrunc(ValVReg, Copy); + break; + } + default: + MIRBuilder.buildCopy(ValVReg, PhysReg); + break; + } markPhysRegUsed(PhysReg); } -unsigned IncomingValueHandler::getStackAddress(uint64_t Size, int64_t Offset, - MachinePointerInfo &MPO) { +unsigned IncomingValueHandler::getStackAddress(const CCValAssign &VA, + MachineMemOperand *&MMO) { + unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; + unsigned Offset = VA.getLocMemOffset(); MachineFrameInfo &MFI = MIRBuilder.getMF().getFrameInfo(); int FI = MFI.CreateFixedObject(Size, Offset, true); - MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); + MachinePointerInfo MPO = + MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI); + MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOLoad, + Size, /* Alignment */ 0); unsigned AddrReg = MRI.createGenericVirtualRegister(LLT::pointer(0, 32)); MIRBuilder.buildFrameIndex(AddrReg, FI); @@ -104,19 +163,26 @@ unsigned IncomingValueHandler::getStackAddress(uint64_t Size, int64_t Offset, return AddrReg; } -void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, unsigned Addr, - uint64_t Size, - MachinePointerInfo &MPO) { - // If the value is not extended, a simple load will suffice. - buildLoad(ValVReg, Addr, Size, /* Alignment */ 0, MPO); +void IncomingValueHandler::assignValueToAddress(unsigned ValVReg, + const CCValAssign &VA) { + if (VA.getLocInfo() == CCValAssign::SExt || + VA.getLocInfo() == CCValAssign::ZExt || + VA.getLocInfo() == CCValAssign::AExt) { + unsigned LoadReg = MRI.createGenericVirtualRegister(LLT::scalar(32)); + buildLoad(LoadReg, VA); + MIRBuilder.buildTrunc(ValVReg, LoadReg); + } else + buildLoad(ValVReg, VA); } -bool IncomingValueHandler::handle(ArrayRef<CCValAssign> ArgLocs, - ArrayRef<CallLowering::ArgInfo> Args) { - for (unsigned i = 0, ArgsSize = Args.size(); i < ArgsSize; ++i) { - if (!assign(ArgLocs[i], Args[i].Reg)) - return false; - } +bool IncomingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, + ArrayRef<CCValAssign> ArgLocs, + unsigned ArgLocsStartIndex, + unsigned ArgsReg) { + if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) + return false; + setLeastSignificantFirst(VRegs); + MIRBuilder.buildMerge(ArgsReg, VRegs); return true; } @@ -127,103 +193,179 @@ public: MachineInstrBuilder &MIB) : MipsHandler(MIRBuilder, MRI), MIB(MIB) {} - bool handle(ArrayRef<CCValAssign> ArgLocs, - ArrayRef<CallLowering::ArgInfo> Args); - private: - void assignValueToReg(unsigned ValVReg, unsigned PhysReg) override; + void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) override; + + unsigned getStackAddress(const CCValAssign &VA, + MachineMemOperand *&MMO) override; + + void assignValueToAddress(unsigned ValVReg, const CCValAssign &VA) override; - unsigned getStackAddress(uint64_t Size, int64_t Offset, - MachinePointerInfo &MPO) override; + bool handleSplit(SmallVectorImpl<unsigned> &VRegs, + ArrayRef<CCValAssign> ArgLocs, unsigned ArgLocsStartIndex, + unsigned ArgsReg) override; - void assignValueToAddress(unsigned ValVReg, unsigned Addr, uint64_t Size, - MachinePointerInfo &MPO) override; + unsigned extendRegister(unsigned ValReg, const CCValAssign &VA); MachineInstrBuilder &MIB; }; } // end anonymous namespace void OutgoingValueHandler::assignValueToReg(unsigned ValVReg, - unsigned PhysReg) { - MIRBuilder.buildCopy(PhysReg, ValVReg); + const CCValAssign &VA) { + unsigned PhysReg = VA.getLocReg(); + unsigned ExtReg = extendRegister(ValVReg, VA); + MIRBuilder.buildCopy(PhysReg, ExtReg); MIB.addUse(PhysReg, RegState::Implicit); } -unsigned OutgoingValueHandler::getStackAddress(uint64_t Size, int64_t Offset, - MachinePointerInfo &MPO) { +unsigned OutgoingValueHandler::getStackAddress(const CCValAssign &VA, + MachineMemOperand *&MMO) { LLT p0 = LLT::pointer(0, 32); LLT s32 = LLT::scalar(32); unsigned SPReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildCopy(SPReg, Mips::SP); unsigned OffsetReg = MRI.createGenericVirtualRegister(s32); + unsigned Offset = VA.getLocMemOffset(); MIRBuilder.buildConstant(OffsetReg, Offset); unsigned AddrReg = MRI.createGenericVirtualRegister(p0); MIRBuilder.buildGEP(AddrReg, SPReg, OffsetReg); - MPO = MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); + MachinePointerInfo MPO = + MachinePointerInfo::getStack(MIRBuilder.getMF(), Offset); + unsigned Size = alignTo(VA.getValVT().getSizeInBits(), 8) / 8; + MMO = MIRBuilder.getMF().getMachineMemOperand(MPO, MachineMemOperand::MOStore, + Size, /* Alignment */ 0); + return AddrReg; } -void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, unsigned Addr, - uint64_t Size, - MachinePointerInfo &MPO) { - MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand( - MPO, MachineMemOperand::MOStore, Size, /* Alignment */ 0); - MIRBuilder.buildStore(ValVReg, Addr, *MMO); +void OutgoingValueHandler::assignValueToAddress(unsigned ValVReg, + const CCValAssign &VA) { + MachineMemOperand *MMO; + unsigned Addr = getStackAddress(VA, MMO); + unsigned ExtReg = extendRegister(ValVReg, VA); + MIRBuilder.buildStore(ExtReg, Addr, *MMO); } -bool OutgoingValueHandler::handle(ArrayRef<CCValAssign> ArgLocs, - ArrayRef<CallLowering::ArgInfo> Args) { - for (unsigned i = 0; i < Args.size(); ++i) { - if (!assign(ArgLocs[i], Args[i].Reg)) - return false; +unsigned OutgoingValueHandler::extendRegister(unsigned ValReg, + const CCValAssign &VA) { + LLT LocTy{VA.getLocVT()}; + switch (VA.getLocInfo()) { + case CCValAssign::SExt: { + unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildSExt(ExtReg, ValReg); + return ExtReg; + } + case CCValAssign::ZExt: { + unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildZExt(ExtReg, ValReg); + return ExtReg; } + case CCValAssign::AExt: { + unsigned ExtReg = MRI.createGenericVirtualRegister(LocTy); + MIRBuilder.buildAnyExt(ExtReg, ValReg); + return ExtReg; + } + // TODO : handle upper extends + case CCValAssign::Full: + return ValReg; + default: + break; + } + llvm_unreachable("unable to extend register"); +} + +bool OutgoingValueHandler::handleSplit(SmallVectorImpl<unsigned> &VRegs, + ArrayRef<CCValAssign> ArgLocs, + unsigned ArgLocsStartIndex, + unsigned ArgsReg) { + MIRBuilder.buildUnmerge(VRegs, ArgsReg); + setLeastSignificantFirst(VRegs); + if (!assignVRegs(VRegs, ArgLocs, ArgLocsStartIndex)) + return false; + return true; } static bool isSupportedType(Type *T) { - if (T->isIntegerTy() && T->getScalarSizeInBits() == 32) + if (T->isIntegerTy()) return true; if (T->isPointerTy()) return true; return false; } +static CCValAssign::LocInfo determineLocInfo(const MVT RegisterVT, const EVT VT, + const ISD::ArgFlagsTy &Flags) { + // > does not mean loss of information as type RegisterVT can't hold type VT, + // it means that type VT is split into multiple registers of type RegisterVT + if (VT.getSizeInBits() >= RegisterVT.getSizeInBits()) + return CCValAssign::LocInfo::Full; + if (Flags.isSExt()) + return CCValAssign::LocInfo::SExt; + if (Flags.isZExt()) + return CCValAssign::LocInfo::ZExt; + return CCValAssign::LocInfo::AExt; +} + +template <typename T> +static void setLocInfo(SmallVectorImpl<CCValAssign> &ArgLocs, + const SmallVectorImpl<T> &Arguments) { + for (unsigned i = 0; i < ArgLocs.size(); ++i) { + const CCValAssign &VA = ArgLocs[i]; + CCValAssign::LocInfo LocInfo = determineLocInfo( + Arguments[i].VT, Arguments[i].ArgVT, Arguments[i].Flags); + if (VA.isMemLoc()) + ArgLocs[i] = + CCValAssign::getMem(VA.getValNo(), VA.getValVT(), + VA.getLocMemOffset(), VA.getLocVT(), LocInfo); + else + ArgLocs[i] = CCValAssign::getReg(VA.getValNo(), VA.getValVT(), + VA.getLocReg(), VA.getLocVT(), LocInfo); + } +} + bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, - const Value *Val, unsigned VReg) const { + const Value *Val, + ArrayRef<unsigned> VRegs) const { MachineInstrBuilder Ret = MIRBuilder.buildInstrNoInsert(Mips::RetRA); - if (Val != nullptr) { - if (!isSupportedType(Val->getType())) - return false; + if (Val != nullptr && !isSupportedType(Val->getType())) + return false; + if (!VRegs.empty()) { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); const DataLayout &DL = MF.getDataLayout(); const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); + LLVMContext &Ctx = Val->getType()->getContext(); + + SmallVector<EVT, 4> SplitEVTs; + ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs); + assert(VRegs.size() == SplitEVTs.size() && + "For each split Type there should be exactly one VReg."); SmallVector<ArgInfo, 8> RetInfos; SmallVector<unsigned, 8> OrigArgIndices; - ArgInfo ArgRetInfo(VReg, Val->getType()); - setArgFlags(ArgRetInfo, AttributeList::ReturnIndex, DL, F); - splitToValueTypes(ArgRetInfo, 0, RetInfos, OrigArgIndices); + for (unsigned i = 0; i < SplitEVTs.size(); ++i) { + ArgInfo CurArgInfo = ArgInfo{VRegs[i], SplitEVTs[i].getTypeForEVT(Ctx)}; + setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F); + splitToValueTypes(CurArgInfo, 0, RetInfos, OrigArgIndices); + } SmallVector<ISD::OutputArg, 8> Outs; - subTargetRegTypeForCallingConv( - MIRBuilder, RetInfos, OrigArgIndices, - [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, - unsigned origIdx, unsigned partOffs) { - Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); - }); + subTargetRegTypeForCallingConv(F, RetInfos, OrigArgIndices, Outs); SmallVector<CCValAssign, 16> ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); CCInfo.AnalyzeReturn(Outs, TLI.CCAssignFnForReturn()); + setLocInfo(ArgLocs, Outs); OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret); if (!RetHandler.handle(ArgLocs, RetInfos)) { @@ -266,12 +408,7 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, } SmallVector<ISD::InputArg, 8> Ins; - subTargetRegTypeForCallingConv( - MIRBuilder, ArgInfos, OrigArgIndices, - [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, - unsigned partOffs) { - Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); - }); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Ins); SmallVector<CCValAssign, 16> ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -283,6 +420,7 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()), 1); CCInfo.AnalyzeFormalArguments(Ins, TLI.CCAssignFnForCall()); + setLocInfo(ArgLocs, Ins); IncomingValueHandler Handler(MIRBuilder, MF.getRegInfo()); if (!Handler.handle(ArgLocs, ArgInfos)) @@ -347,12 +485,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, } SmallVector<ISD::OutputArg, 8> Outs; - subTargetRegTypeForCallingConv( - MIRBuilder, ArgInfos, OrigArgIndices, - [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, unsigned origIdx, - unsigned partOffs) { - Outs.emplace_back(flags, vt, argvt, used, origIdx, partOffs); - }); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigArgIndices, Outs); SmallVector<CCValAssign, 8> ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, @@ -361,6 +494,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(CallConv), 1); const char *Call = Callee.isSymbol() ? Callee.getSymbolName() : nullptr; CCInfo.AnalyzeCallOperands(Outs, TLI.CCAssignFnForCall(), FuncOrigArgs, Call); + setLocInfo(ArgLocs, Outs); OutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB); if (!RetHandler.handle(ArgLocs, ArgInfos)) { @@ -383,18 +517,14 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, splitToValueTypes(OrigRet, 0, ArgInfos, OrigRetIndices); SmallVector<ISD::InputArg, 8> Ins; - subTargetRegTypeForCallingConv( - MIRBuilder, ArgInfos, OrigRetIndices, - [&](ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, - unsigned origIdx, unsigned partOffs) { - Ins.emplace_back(flags, vt, argvt, used, origIdx, partOffs); - }); + subTargetRegTypeForCallingConv(F, ArgInfos, OrigRetIndices, Ins); SmallVector<CCValAssign, 8> ArgLocs; MipsCCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); CCInfo.AnalyzeCallResult(Ins, TLI.CCAssignFnForReturn(), OrigRet.Ty, Call); + setLocInfo(ArgLocs, Ins); CallReturnHandler Handler(MIRBuilder, MF.getRegInfo(), MIB); if (!Handler.handle(ArgLocs, ArgInfos)) @@ -406,11 +536,10 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, return true; } +template <typename T> void MipsCallLowering::subTargetRegTypeForCallingConv( - MachineIRBuilder &MIRBuilder, ArrayRef<ArgInfo> Args, - ArrayRef<unsigned> OrigArgIndices, const FunTy &PushBack) const { - MachineFunction &MF = MIRBuilder.getMF(); - const Function &F = MF.getFunction(); + const Function &F, ArrayRef<ArgInfo> Args, + ArrayRef<unsigned> OrigArgIndices, SmallVectorImpl<T> &ISDArgs) const { const DataLayout &DL = F.getParent()->getDataLayout(); const MipsTargetLowering &TLI = *getTLI<MipsTargetLowering>(); @@ -420,12 +549,20 @@ void MipsCallLowering::subTargetRegTypeForCallingConv( EVT VT = TLI.getValueType(DL, Arg.Ty); MVT RegisterVT = TLI.getRegisterTypeForCallingConv(F.getContext(), F.getCallingConv(), VT); + unsigned NumRegs = TLI.getNumRegistersForCallingConv( + F.getContext(), F.getCallingConv(), VT); - ISD::ArgFlagsTy Flags = Arg.Flags; - Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); + for (unsigned i = 0; i < NumRegs; ++i) { + ISD::ArgFlagsTy Flags = Arg.Flags; - PushBack(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], 0); + if (i == 0) + Flags.setOrigAlign(TLI.getABIAlignmentForCallingConv(Arg.Ty, DL)); + else + Flags.setOrigAlign(1); + ISDArgs.emplace_back(Flags, RegisterVT, VT, true, OrigArgIndices[ArgNo], + 0); + } ++ArgNo; } } diff --git a/lib/Target/Mips/MipsCallLowering.h b/lib/Target/Mips/MipsCallLowering.h index e23c10cec563..9916b04ef50c 100644 --- a/lib/Target/Mips/MipsCallLowering.h +++ b/lib/Target/Mips/MipsCallLowering.h @@ -31,27 +31,38 @@ public: virtual ~MipsHandler() = default; + bool handle(ArrayRef<CCValAssign> ArgLocs, + ArrayRef<CallLowering::ArgInfo> Args); + protected: - bool assign(const CCValAssign &VA, unsigned vreg); + bool assignVRegs(ArrayRef<unsigned> VRegs, ArrayRef<CCValAssign> ArgLocs, + unsigned Index); + + void setLeastSignificantFirst(SmallVectorImpl<unsigned> &VRegs); MachineIRBuilder &MIRBuilder; MachineRegisterInfo &MRI; private: - virtual unsigned getStackAddress(uint64_t Size, int64_t Offset, - MachinePointerInfo &MPO) = 0; + bool assign(unsigned VReg, const CCValAssign &VA); + + virtual unsigned getStackAddress(const CCValAssign &VA, + MachineMemOperand *&MMO) = 0; - virtual void assignValueToReg(unsigned ValVReg, unsigned PhysReg) = 0; + virtual void assignValueToReg(unsigned ValVReg, const CCValAssign &VA) = 0; - virtual void assignValueToAddress(unsigned ValVReg, unsigned Addr, - uint64_t Size, - MachinePointerInfo &MPO) = 0; + virtual void assignValueToAddress(unsigned ValVReg, + const CCValAssign &VA) = 0; + + virtual bool handleSplit(SmallVectorImpl<unsigned> &VRegs, + ArrayRef<CCValAssign> ArgLocs, + unsigned ArgLocsStartIndex, unsigned ArgsReg) = 0; }; MipsCallLowering(const MipsTargetLowering &TLI); - bool lowerReturn(MachineIRBuilder &MIRBuiler, const Value *Val, - unsigned VReg) const override; + bool lowerReturn(MachineIRBuilder &MIRBuilder, const Value *Val, + ArrayRef<unsigned> VRegs) const override; bool lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef<unsigned> VRegs) const override; @@ -61,21 +72,16 @@ public: ArrayRef<ArgInfo> OrigArgs) const override; private: - using FunTy = - std::function<void(ISD::ArgFlagsTy flags, EVT vt, EVT argvt, bool used, - unsigned origIdx, unsigned partOffs)>; - /// Based on registers available on target machine split or extend /// type if needed, also change pointer type to appropriate integer - /// type. Lambda will fill some info so we can tell MipsCCState to - /// assign physical registers. - void subTargetRegTypeForCallingConv(MachineIRBuilder &MIRBuilder, - ArrayRef<ArgInfo> Args, + /// type. + template <typename T> + void subTargetRegTypeForCallingConv(const Function &F, ArrayRef<ArgInfo> Args, ArrayRef<unsigned> OrigArgIndices, - const FunTy &PushBack) const; + SmallVectorImpl<T> &ISDArgs) const; /// Split structures and arrays, save original argument indices since - /// Mips calling conv needs info about original argument type. + /// Mips calling convention needs info about original argument type. void splitToValueTypes(const ArgInfo &OrigArg, unsigned OriginalIndex, SmallVectorImpl<ArgInfo> &SplitArgs, SmallVectorImpl<unsigned> &SplitArgsOrigIndices) const; diff --git a/lib/Target/Mips/MipsCondMov.td b/lib/Target/Mips/MipsCondMov.td index 39dc2654aa6a..0d7e3e200b5f 100644 --- a/lib/Target/Mips/MipsCondMov.td +++ b/lib/Target/Mips/MipsCondMov.td @@ -296,3 +296,13 @@ def PseudoSELECTFP_F_I64 : SelectFP_Pseudo_F<GPR64Opnd>; def PseudoSELECTFP_F_S : SelectFP_Pseudo_F<FGR32Opnd>; def PseudoSELECTFP_F_D32 : SelectFP_Pseudo_F<AFGR64Opnd>, FGR_32; def PseudoSELECTFP_F_D64 : SelectFP_Pseudo_F<FGR64Opnd>, FGR_64; + +let usesCustomInserter = 1 in { +class D_SELECT_CLASS<RegisterOperand RC> : + PseudoSE<(outs RC:$dst1, RC:$dst2), + (ins GPR32Opnd:$cond, RC:$a1, RC:$a2, RC:$b1, RC:$b2), []>, + ISA_MIPS1_NOT_4_32; +} + +def PseudoD_SELECT_I : D_SELECT_CLASS<GPR32Opnd>; +def PseudoD_SELECT_I64 : D_SELECT_CLASS<GPR64Opnd>; diff --git a/lib/Target/Mips/MipsDelaySlotFiller.cpp b/lib/Target/Mips/MipsDelaySlotFiller.cpp index 33f03b954a8c..e3823e0dfdb8 100644 --- a/lib/Target/Mips/MipsDelaySlotFiller.cpp +++ b/lib/Target/Mips/MipsDelaySlotFiller.cpp @@ -728,9 +728,10 @@ bool MipsDelaySlotFiller::searchRange(MachineBasicBlock &MBB, IterTy Begin, (Opcode == Mips::JR || Opcode == Mips::PseudoIndirectBranch || Opcode == Mips::PseudoReturn || Opcode == Mips::TAILCALL)) continue; - // Instructions LWP/SWP should not be in a delay slot as that + // Instructions LWP/SWP and MOVEP should not be in a delay slot as that // results in unpredictable behaviour - if (InMicroMipsMode && (Opcode == Mips::LWP_MM || Opcode == Mips::SWP_MM)) + if (InMicroMipsMode && (Opcode == Mips::LWP_MM || Opcode == Mips::SWP_MM || + Opcode == Mips::MOVEP_MM)) continue; Filler = CurrI; diff --git a/lib/Target/Mips/MipsFastISel.cpp b/lib/Target/Mips/MipsFastISel.cpp index 19b30a44e86a..22ade31a72cd 100644 --- a/lib/Target/Mips/MipsFastISel.cpp +++ b/lib/Target/Mips/MipsFastISel.cpp @@ -953,6 +953,11 @@ bool MipsFastISel::selectBranch(const Instruction *I) { MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)]; // For now, just try the simplest case where it's fed by a compare. if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) { + MVT CIMVT = + TLI.getValueType(DL, CI->getOperand(0)->getType(), true).getSimpleVT(); + if (CIMVT == MVT::i1) + return false; + unsigned CondReg = getRegForValue(CI); BuildMI(*BrBB, FuncInfo.InsertPt, DbgLoc, TII.get(Mips::BGTZ)) .addReg(CondReg) diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0677d378a115..8c2a364cdfa9 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -1396,6 +1396,9 @@ MipsTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI, case Mips::PseudoSELECTFP_T_D32: case Mips::PseudoSELECTFP_T_D64: return emitPseudoSELECT(MI, BB, true, Mips::BC1T); + case Mips::PseudoD_SELECT_I: + case Mips::PseudoD_SELECT_I64: + return emitPseudoD_SELECT(MI, BB); } } @@ -2427,6 +2430,16 @@ SDValue MipsTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, DAG.getConstant(VT.getSizeInBits(), DL, MVT::i32)); SDValue Ext = DAG.getNode(ISD::SRA, DL, VT, Hi, DAG.getConstant(VT.getSizeInBits() - 1, DL, VT)); + + if (!(Subtarget.hasMips4() || Subtarget.hasMips32())) { + SDVTList VTList = DAG.getVTList(VT, VT); + return DAG.getNode(Subtarget.isGP64bit() ? Mips::PseudoD_SELECT_I64 + : Mips::PseudoD_SELECT_I, + DL, VTList, Cond, ShiftRightHi, + IsSRA ? Ext : DAG.getConstant(0, DL, VT), Or, + ShiftRightHi); + } + Lo = DAG.getNode(ISD::SELECT, DL, VT, Cond, ShiftRightHi, Or); Hi = DAG.getNode(ISD::SELECT, DL, VT, Cond, IsSRA ? Ext : DAG.getConstant(0, DL, VT), ShiftRightHi); @@ -2563,10 +2576,12 @@ static SDValue lowerUnalignedIntStore(StoreSDNode *SD, SelectionDAG &DAG, } // Lower (store (fp_to_sint $fp) $ptr) to (store (TruncIntFP $fp), $ptr). -static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG) { +static SDValue lowerFP_TO_SINT_STORE(StoreSDNode *SD, SelectionDAG &DAG, + bool SingleFloat) { SDValue Val = SD->getValue(); - if (Val.getOpcode() != ISD::FP_TO_SINT) + if (Val.getOpcode() != ISD::FP_TO_SINT || + (Val.getValueSizeInBits() > 32 && SingleFloat)) return SDValue(); EVT FPTy = EVT::getFloatingPointVT(Val.getValueSizeInBits()); @@ -2587,7 +2602,7 @@ SDValue MipsTargetLowering::lowerSTORE(SDValue Op, SelectionDAG &DAG) const { ((MemVT == MVT::i32) || (MemVT == MVT::i64))) return lowerUnalignedIntStore(SD, DAG, Subtarget.isLittle()); - return lowerFP_TO_SINT_STORE(SD, DAG); + return lowerFP_TO_SINT_STORE(SD, DAG, Subtarget.isSingleFloat()); } SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op, @@ -2603,6 +2618,9 @@ SDValue MipsTargetLowering::lowerEH_DWARF_CFA(SDValue Op, SDValue MipsTargetLowering::lowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const { + if (Op.getValueSizeInBits() > 32 && Subtarget.isSingleFloat()) + return SDValue(); + EVT FPTy = EVT::getFloatingPointVT(Op.getValueSizeInBits()); SDValue Trunc = DAG.getNode(MipsISD::TruncIntFP, SDLoc(Op), FPTy, Op.getOperand(0)); @@ -4340,6 +4358,81 @@ MachineBasicBlock *MipsTargetLowering::emitPseudoSELECT(MachineInstr &MI, return BB; } +MachineBasicBlock *MipsTargetLowering::emitPseudoD_SELECT(MachineInstr &MI, + MachineBasicBlock *BB) const { + assert(!(Subtarget.hasMips4() || Subtarget.hasMips32()) && + "Subtarget already supports SELECT nodes with the use of" + "conditional-move instructions."); + + const TargetInstrInfo *TII = Subtarget.getInstrInfo(); + DebugLoc DL = MI.getDebugLoc(); + + // D_SELECT substitutes two SELECT nodes that goes one after another and + // have the same condition operand. On machines which don't have + // conditional-move instruction, it reduces unnecessary branch instructions + // which are result of using two diamond patterns that are result of two + // SELECT pseudo instructions. + const BasicBlock *LLVM_BB = BB->getBasicBlock(); + MachineFunction::iterator It = ++BB->getIterator(); + + // thisMBB: + // ... + // TrueVal = ... + // setcc r1, r2, r3 + // bNE r1, r0, copy1MBB + // fallthrough --> copy0MBB + MachineBasicBlock *thisMBB = BB; + MachineFunction *F = BB->getParent(); + MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB); + MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB); + F->insert(It, copy0MBB); + F->insert(It, sinkMBB); + + // Transfer the remainder of BB and its successor edges to sinkMBB. + sinkMBB->splice(sinkMBB->begin(), BB, + std::next(MachineBasicBlock::iterator(MI)), BB->end()); + sinkMBB->transferSuccessorsAndUpdatePHIs(BB); + + // Next, add the true and fallthrough blocks as its successors. + BB->addSuccessor(copy0MBB); + BB->addSuccessor(sinkMBB); + + // bne rs, $0, sinkMBB + BuildMI(BB, DL, TII->get(Mips::BNE)) + .addReg(MI.getOperand(2).getReg()) + .addReg(Mips::ZERO) + .addMBB(sinkMBB); + + // copy0MBB: + // %FalseValue = ... + // # fallthrough to sinkMBB + BB = copy0MBB; + + // Update machine-CFG edges + BB->addSuccessor(sinkMBB); + + // sinkMBB: + // %Result = phi [ %TrueValue, thisMBB ], [ %FalseValue, copy0MBB ] + // ... + BB = sinkMBB; + + // Use two PHI nodes to select two reults + BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(0).getReg()) + .addReg(MI.getOperand(3).getReg()) + .addMBB(thisMBB) + .addReg(MI.getOperand(5).getReg()) + .addMBB(copy0MBB); + BuildMI(*BB, BB->begin(), DL, TII->get(Mips::PHI), MI.getOperand(1).getReg()) + .addReg(MI.getOperand(4).getReg()) + .addMBB(thisMBB) + .addReg(MI.getOperand(6).getReg()) + .addMBB(copy0MBB); + + MI.eraseFromParent(); // The pseudo instruction is gone now. + + return BB; +} + // FIXME? Maybe this could be a TableGen attribute on some registers and // this table could be generated automatically from RegInfo. unsigned MipsTargetLowering::getRegisterByName(const char* RegName, EVT VT, diff --git a/lib/Target/Mips/MipsISelLowering.h b/lib/Target/Mips/MipsISelLowering.h index 5a0de45c44f3..e043f133a09f 100644 --- a/lib/Target/Mips/MipsISelLowering.h +++ b/lib/Target/Mips/MipsISelLowering.h @@ -699,6 +699,8 @@ class TargetRegisterClass; MachineBasicBlock *emitSEL_D(MachineInstr &MI, MachineBasicBlock *BB) const; MachineBasicBlock *emitPseudoSELECT(MachineInstr &MI, MachineBasicBlock *BB, bool isFPCmp, unsigned Opc) const; + MachineBasicBlock *emitPseudoD_SELECT(MachineInstr &MI, + MachineBasicBlock *BB) const; }; /// Create MipsTargetLowering objects. diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index dd30e20a743c..4cb8574e08f6 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -130,6 +130,15 @@ class ABSS_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC, HARDFLOAT, NeverHasSideEffects; +class CVT_PS_S_FT<string opstr, RegisterOperand DstRC, RegisterOperand SrcRC, InstrItinClass Itin, bit IsComm, + SDPatternOperator OpNode= null_frag> : + InstSE<(outs DstRC:$fd), (ins SrcRC:$fs, SrcRC:$ft), + !strconcat(opstr, "\t$fd, $fs, $ft"), + [(set DstRC:$fd, (OpNode SrcRC:$fs, SrcRC:$ft))], Itin, FrmFR, opstr>, + HARDFLOAT { + let isCommutable = IsComm; +} + multiclass ABSS_M<string opstr, InstrItinClass Itin, SDPatternOperator OpNode= null_frag> { def _D32 : MMRel, ABSS_FT<opstr, AFGR64Opnd, AFGR64Opnd, Itin, OpNode>, @@ -432,6 +441,29 @@ let AdditionalPredicates = [NotInMicroMips] in { def CVT_D32_W : MMRel, ABSS_FT<"cvt.d.w", AFGR64Opnd, FGR32Opnd, II_CVT>, ABSS_FM<0x21, 20>, ISA_MIPS1, FGR_32; } + +let DecoderNamespace = "MipsFP64" in { + let AdditionalPredicates = [NotInMicroMips] in { + def PLL_PS64 : ADDS_FT<"pll.ps", FGR64Opnd, II_CVT, 0>, + ADDS_FM<0x2C, 22>, + ISA_MIPS32R2_NOT_32R6_64R6, FGR_64; + def PLU_PS64 : ADDS_FT<"plu.ps", FGR64Opnd, II_CVT, 0>, + ADDS_FM<0x2D, 22>, + ISA_MIPS32R2_NOT_32R6_64R6, FGR_64; + + def CVT_S_PU64 : ABSS_FT<"cvt.s.pu", FGR32Opnd, FGR64Opnd, II_CVT>, + ABSS_FM<0x20, 22>, + ISA_MIPS32R2_NOT_32R6_64R6, FGR_64; + def CVT_S_PL64 : ABSS_FT<"cvt.s.pl", FGR32Opnd, FGR64Opnd, II_CVT>, + ABSS_FM<0x28, 22>, + ISA_MIPS32R2_NOT_32R6_64R6, FGR_64; + + def CVT_PS_S64 : CVT_PS_S_FT<"cvt.ps.s", FGR64Opnd, FGR32Opnd, II_CVT, 0>, + ADDS_FM<0x26, 16>, + ISA_MIPS32R2_NOT_32R6_64R6, FGR_64; + } +} + let DecoderNamespace = "MipsFP64" in { let AdditionalPredicates = [NotInMicroMips] in { def CVT_S_L : ABSS_FT<"cvt.s.l", FGR32Opnd, FGR64Opnd, II_CVT>, @@ -485,14 +517,14 @@ let AdditionalPredicates = [NotInMicroMips] in { def CTC1 : MMRel, MTC1_FT<"ctc1", CCROpnd, GPR32Opnd, II_CTC1>, MFC1_FM<6>, ISA_MIPS1; - def MFC1 : MMRel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1, - bitconvert>, MFC1_FM<0>, ISA_MIPS1; + def MFC1 : MMRel, StdMMR6Rel, MFC1_FT<"mfc1", GPR32Opnd, FGR32Opnd, II_MFC1, + bitconvert>, MFC1_FM<0>, ISA_MIPS1; def MFC1_D64 : MFC1_FT<"mfc1", GPR32Opnd, FGR64Opnd, II_MFC1>, MFC1_FM<0>, ISA_MIPS1, FGR_64 { let DecoderNamespace = "MipsFP64"; } - def MTC1 : MMRel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1, - bitconvert>, MFC1_FM<4>, ISA_MIPS1; + def MTC1 : MMRel, StdMMR6Rel, MTC1_FT<"mtc1", FGR32Opnd, GPR32Opnd, II_MTC1, + bitconvert>, MFC1_FM<4>, ISA_MIPS1; def MTC1_D64 : MTC1_FT<"mtc1", FGR64Opnd, GPR32Opnd, II_MTC1>, MFC1_FM<4>, ISA_MIPS1, FGR_64 { let DecoderNamespace = "MipsFP64"; diff --git a/lib/Target/Mips/MipsInstrInfo.cpp b/lib/Target/Mips/MipsInstrInfo.cpp index 0e0e712dba19..bfb4c775205d 100644 --- a/lib/Target/Mips/MipsInstrInfo.cpp +++ b/lib/Target/Mips/MipsInstrInfo.cpp @@ -280,6 +280,8 @@ bool MipsInstrInfo::isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) switch (BranchOpc) { case Mips::B: case Mips::BAL: + case Mips::BAL_BR: + case Mips::BAL_BR_MM: case Mips::BC1F: case Mips::BC1FL: case Mips::BC1T: @@ -661,8 +663,7 @@ MipsInstrInfo::genInstrWithNewOpc(unsigned NewOpc, } MIB.copyImplicitOps(*I); - - MIB.setMemRefs(I->memoperands_begin(), I->memoperands_end()); + MIB.cloneMemRefs(*I); return MIB; } diff --git a/lib/Target/Mips/MipsInstrInfo.td b/lib/Target/Mips/MipsInstrInfo.td index 0faa13d4d63f..d9398b7d6024 100644 --- a/lib/Target/Mips/MipsInstrInfo.td +++ b/lib/Target/Mips/MipsInstrInfo.td @@ -2002,13 +2002,19 @@ let isPseudo = 1, isCodeGenOnly = 1, hasNoSchedulingInfo = 1 in { // branches. See the comment in file MipsLongBranch.cpp for detailed // explanation. -// Expands to: lui $dst, %hi($tgt - $baltgt) +// Expands to: lui $dst, %highest/%higher/%hi/%lo($tgt - $baltgt) def LONG_BRANCH_LUi : PseudoSE<(outs GPR32Opnd:$dst), (ins brtarget:$tgt, brtarget:$baltgt), []>; +// Expands to: lui $dst, highest/%higher/%hi/%lo($tgt) +def LONG_BRANCH_LUi2Op : PseudoSE<(outs GPR32Opnd:$dst), + (ins brtarget:$tgt), []>; -// Expands to: addiu $dst, $src, %lo($tgt - $baltgt) +// Expands to: addiu $dst, $src, %highest/%higher/%hi/%lo($tgt - $baltgt) def LONG_BRANCH_ADDiu : PseudoSE<(outs GPR32Opnd:$dst), (ins GPR32Opnd:$src, brtarget:$tgt, brtarget:$baltgt), []>; +// Expands to: addiu $dst, $src, %highest/%higher/%hi/%lo($tgt) +def LONG_BRANCH_ADDiu2Op : PseudoSE<(outs GPR32Opnd:$dst), + (ins GPR32Opnd:$src, brtarget:$tgt), []>; //===----------------------------------------------------------------------===// // Instruction definition diff --git a/lib/Target/Mips/MipsInstructionSelector.cpp b/lib/Target/Mips/MipsInstructionSelector.cpp index 6c5b83021f74..b041590ee343 100644 --- a/lib/Target/Mips/MipsInstructionSelector.cpp +++ b/lib/Target/Mips/MipsInstructionSelector.cpp @@ -15,6 +15,7 @@ #include "MipsRegisterBankInfo.h" #include "MipsTargetMachine.h" #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h" +#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" #define DEBUG_TYPE "mips-isel" @@ -144,6 +145,42 @@ bool MipsInstructionSelector::select(MachineInstr &I, .addMemOperand(*I.memoperands_begin()); break; } + case G_UDIV: + case G_UREM: + case G_SDIV: + case G_SREM: { + unsigned HILOReg = MRI.createVirtualRegister(&Mips::ACC64RegClass); + bool IsSigned = I.getOpcode() == G_SREM || I.getOpcode() == G_SDIV; + bool IsDiv = I.getOpcode() == G_UDIV || I.getOpcode() == G_SDIV; + + MachineInstr *PseudoDIV, *PseudoMove; + PseudoDIV = BuildMI(MBB, I, I.getDebugLoc(), + TII.get(IsSigned ? Mips::PseudoSDIV : Mips::PseudoUDIV)) + .addDef(HILOReg) + .add(I.getOperand(1)) + .add(I.getOperand(2)); + if (!constrainSelectedInstRegOperands(*PseudoDIV, TII, TRI, RBI)) + return false; + + PseudoMove = BuildMI(MBB, I, I.getDebugLoc(), + TII.get(IsDiv ? Mips::PseudoMFLO : Mips::PseudoMFHI)) + .addDef(I.getOperand(0).getReg()) + .addUse(HILOReg); + if (!constrainSelectedInstRegOperands(*PseudoMove, TII, TRI, RBI)) + return false; + + I.eraseFromParent(); + return true; + } + case G_SELECT: { + // Handle operands with pointer type. + MI = BuildMI(MBB, I, I.getDebugLoc(), TII.get(Mips::MOVN_I_I)) + .add(I.getOperand(0)) + .add(I.getOperand(2)) + .add(I.getOperand(1)) + .add(I.getOperand(3)); + break; + } case G_CONSTANT: { int Imm = I.getOperand(1).getCImm()->getValue().getLimitedValue(); unsigned LUiReg = MRI.createVirtualRegister(&Mips::GPR32RegClass); @@ -193,7 +230,85 @@ bool MipsInstructionSelector::select(MachineInstr &I, I.eraseFromParent(); return true; } + case G_ICMP: { + struct Instr { + unsigned Opcode, Def, LHS, RHS; + Instr(unsigned Opcode, unsigned Def, unsigned LHS, unsigned RHS) + : Opcode(Opcode), Def(Def), LHS(LHS), RHS(RHS){}; + + bool hasImm() const { + if (Opcode == Mips::SLTiu || Opcode == Mips::XORi) + return true; + return false; + } + }; + + SmallVector<struct Instr, 2> Instructions; + unsigned ICMPReg = I.getOperand(0).getReg(); + unsigned Temp = MRI.createVirtualRegister(&Mips::GPR32RegClass); + unsigned LHS = I.getOperand(2).getReg(); + unsigned RHS = I.getOperand(3).getReg(); + CmpInst::Predicate Cond = + static_cast<CmpInst::Predicate>(I.getOperand(1).getPredicate()); + + switch (Cond) { + case CmpInst::ICMP_EQ: // LHS == RHS -> (LHS ^ RHS) < 1 + Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); + Instructions.emplace_back(Mips::SLTiu, ICMPReg, Temp, 1); + break; + case CmpInst::ICMP_NE: // LHS != RHS -> 0 < (LHS ^ RHS) + Instructions.emplace_back(Mips::XOR, Temp, LHS, RHS); + Instructions.emplace_back(Mips::SLTu, ICMPReg, Mips::ZERO, Temp); + break; + case CmpInst::ICMP_UGT: // LHS > RHS -> RHS < LHS + Instructions.emplace_back(Mips::SLTu, ICMPReg, RHS, LHS); + break; + case CmpInst::ICMP_UGE: // LHS >= RHS -> !(LHS < RHS) + Instructions.emplace_back(Mips::SLTu, Temp, LHS, RHS); + Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); + break; + case CmpInst::ICMP_ULT: // LHS < RHS -> LHS < RHS + Instructions.emplace_back(Mips::SLTu, ICMPReg, LHS, RHS); + break; + case CmpInst::ICMP_ULE: // LHS <= RHS -> !(RHS < LHS) + Instructions.emplace_back(Mips::SLTu, Temp, RHS, LHS); + Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); + break; + case CmpInst::ICMP_SGT: // LHS > RHS -> RHS < LHS + Instructions.emplace_back(Mips::SLT, ICMPReg, RHS, LHS); + break; + case CmpInst::ICMP_SGE: // LHS >= RHS -> !(LHS < RHS) + Instructions.emplace_back(Mips::SLT, Temp, LHS, RHS); + Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); + break; + case CmpInst::ICMP_SLT: // LHS < RHS -> LHS < RHS + Instructions.emplace_back(Mips::SLT, ICMPReg, LHS, RHS); + break; + case CmpInst::ICMP_SLE: // LHS <= RHS -> !(RHS < LHS) + Instructions.emplace_back(Mips::SLT, Temp, RHS, LHS); + Instructions.emplace_back(Mips::XORi, ICMPReg, Temp, 1); + break; + default: + return false; + } + + MachineIRBuilder B(I); + for (const struct Instr &Instruction : Instructions) { + MachineInstrBuilder MIB = B.buildInstr( + Instruction.Opcode, {Instruction.Def}, {Instruction.LHS}); + + if (Instruction.hasImm()) + MIB.addImm(Instruction.RHS); + else + MIB.addUse(Instruction.RHS); + if (!MIB.constrainAllUses(TII, TRI, RBI)) + return false; + } + + I.eraseFromParent(); + return true; + } default: return false; } diff --git a/lib/Target/Mips/MipsLegalizerInfo.cpp b/lib/Target/Mips/MipsLegalizerInfo.cpp index fb259516be09..c629f02af00e 100644 --- a/lib/Target/Mips/MipsLegalizerInfo.cpp +++ b/lib/Target/Mips/MipsLegalizerInfo.cpp @@ -13,23 +13,53 @@ #include "MipsLegalizerInfo.h" #include "MipsTargetMachine.h" +#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" using namespace llvm; MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) { using namespace TargetOpcode; + const LLT s1 = LLT::scalar(1); const LLT s32 = LLT::scalar(32); + const LLT s64 = LLT::scalar(64); const LLT p0 = LLT::pointer(0, 32); - getActionDefinitionsBuilder(G_ADD).legalFor({s32}); + getActionDefinitionsBuilder(G_ADD) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + getActionDefinitionsBuilder(G_UADDE) + .lowerFor({{s32, s1}}); getActionDefinitionsBuilder({G_LOAD, G_STORE}) .legalForCartesianProduct({p0, s32}, {p0}); - getActionDefinitionsBuilder(G_CONSTANT) + getActionDefinitionsBuilder(G_SELECT) + .legalForCartesianProduct({p0, s32}, {s32}) + .minScalar(0, s32) + .minScalar(1, s32); + + getActionDefinitionsBuilder({G_AND, G_OR, G_XOR}) + .legalFor({s32}) + .clampScalar(0, s32, s32); + + getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR}) .legalFor({s32}); + getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UREM, G_UDIV}) + .legalFor({s32}) + .minScalar(0, s32) + .libcallFor({s64}); + + getActionDefinitionsBuilder(G_ICMP) + .legalFor({{s32, s32}}) + .minScalar(0, s32); + + getActionDefinitionsBuilder(G_CONSTANT) + .legalFor({s32}) + .clampScalar(0, s32, s32); + getActionDefinitionsBuilder(G_GEP) .legalFor({{p0, s32}}); @@ -42,3 +72,15 @@ MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) { computeTables(); verify(*ST.getInstrInfo()); } + +bool MipsLegalizerInfo::legalizeCustom(MachineInstr &MI, + MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const { + + using namespace TargetOpcode; + + MIRBuilder.setInstr(MI); + + return false; +} diff --git a/lib/Target/Mips/MipsLegalizerInfo.h b/lib/Target/Mips/MipsLegalizerInfo.h index 36dd39c8c1c1..75fadd6cf613 100644 --- a/lib/Target/Mips/MipsLegalizerInfo.h +++ b/lib/Target/Mips/MipsLegalizerInfo.h @@ -14,6 +14,7 @@ #ifndef LLVM_LIB_TARGET_MIPS_MIPSMACHINELEGALIZER_H #define LLVM_LIB_TARGET_MIPS_MIPSMACHINELEGALIZER_H +#include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h" #include "llvm/CodeGen/GlobalISel/LegalizerInfo.h" namespace llvm { @@ -24,6 +25,10 @@ class MipsSubtarget; class MipsLegalizerInfo : public LegalizerInfo { public: MipsLegalizerInfo(const MipsSubtarget &ST); + + bool legalizeCustom(MachineInstr &MI, MachineRegisterInfo &MRI, + MachineIRBuilder &MIRBuilder, + GISelChangeObserver &Observer) const override; }; } // end namespace llvm #endif diff --git a/lib/Target/Mips/MipsMCInstLower.cpp b/lib/Target/Mips/MipsMCInstLower.cpp index 2b7f64099923..46b37ceae391 100644 --- a/lib/Target/Mips/MipsMCInstLower.cpp +++ b/lib/Target/Mips/MipsMCInstLower.cpp @@ -298,12 +298,16 @@ bool MipsMCInstLower::lowerLongBranch(const MachineInstr *MI, default: return false; case Mips::LONG_BRANCH_LUi: + case Mips::LONG_BRANCH_LUi2Op: + case Mips::LONG_BRANCH_LUi2Op_64: lowerLongBranchLUi(MI, OutMI); return true; case Mips::LONG_BRANCH_ADDiu: + case Mips::LONG_BRANCH_ADDiu2Op: lowerLongBranchADDiu(MI, OutMI, Mips::ADDiu); return true; case Mips::LONG_BRANCH_DADDiu: + case Mips::LONG_BRANCH_DADDiu2Op: lowerLongBranchADDiu(MI, OutMI, Mips::DADDiu); return true; } diff --git a/lib/Target/Mips/MipsMSAInstrInfo.td b/lib/Target/Mips/MipsMSAInstrInfo.td index d83f75ffa1c1..eecc7c573df1 100644 --- a/lib/Target/Mips/MipsMSAInstrInfo.td +++ b/lib/Target/Mips/MipsMSAInstrInfo.td @@ -107,6 +107,18 @@ class vfsetcc_type<ValueType ResTy, ValueType OpTy, CondCode CC> : (ResTy (vfsetcc (OpTy node:$lhs), (OpTy node:$rhs), CC))>; // ISD::SETFALSE cannot occur +def vfseteq_v4f32 : vfsetcc_type<v4i32, v4f32, SETEQ>; +def vfseteq_v2f64 : vfsetcc_type<v2i64, v2f64, SETEQ>; +def vfsetge_v4f32 : vfsetcc_type<v4i32, v4f32, SETGE>; +def vfsetge_v2f64 : vfsetcc_type<v2i64, v2f64, SETGE>; +def vfsetgt_v4f32 : vfsetcc_type<v4i32, v4f32, SETGT>; +def vfsetgt_v2f64 : vfsetcc_type<v2i64, v2f64, SETGT>; +def vfsetle_v4f32 : vfsetcc_type<v4i32, v4f32, SETLE>; +def vfsetle_v2f64 : vfsetcc_type<v2i64, v2f64, SETLE>; +def vfsetlt_v4f32 : vfsetcc_type<v4i32, v4f32, SETLT>; +def vfsetlt_v2f64 : vfsetcc_type<v2i64, v2f64, SETLT>; +def vfsetne_v4f32 : vfsetcc_type<v4i32, v4f32, SETNE>; +def vfsetne_v2f64 : vfsetcc_type<v2i64, v2f64, SETNE>; def vfsetoeq_v4f32 : vfsetcc_type<v4i32, v4f32, SETOEQ>; def vfsetoeq_v2f64 : vfsetcc_type<v2i64, v2f64, SETOEQ>; def vfsetoge_v4f32 : vfsetcc_type<v4i32, v4f32, SETOGE>; @@ -4038,3 +4050,20 @@ def : MSAPat< (SPLAT_D v2f64:$ws, (COPY_TO_REGCLASS (i32 (EXTRACT_SUBREG i64:$idx, sub_32)), GPR32)), sub_64))>; + +def : MSAPat<(vfseteq_v4f32 MSA128WOpnd:$a, MSA128WOpnd:$b), + (FCEQ_W MSA128WOpnd:$a, MSA128WOpnd:$b)>; +def : MSAPat<(vfseteq_v2f64 MSA128DOpnd:$a, MSA128DOpnd:$b), + (FCEQ_D MSA128DOpnd:$a, MSA128DOpnd:$b)>; +def : MSAPat<(vfsetle_v4f32 MSA128WOpnd:$a, MSA128WOpnd:$b), + (FCLE_W MSA128WOpnd:$a, MSA128WOpnd:$b)>; +def : MSAPat<(vfsetle_v2f64 MSA128DOpnd:$a, MSA128DOpnd:$b), + (FCLE_D MSA128DOpnd:$a, MSA128DOpnd:$b)>; +def : MSAPat<(vfsetlt_v4f32 MSA128WOpnd:$a, MSA128WOpnd:$b), + (FCLT_W MSA128WOpnd:$a, MSA128WOpnd:$b)>; +def : MSAPat<(vfsetlt_v2f64 MSA128DOpnd:$a, MSA128DOpnd:$b), + (FCLT_D MSA128DOpnd:$a, MSA128DOpnd:$b)>; +def : MSAPat<(vfsetne_v4f32 MSA128WOpnd:$a, MSA128WOpnd:$b), + (FCNE_W MSA128WOpnd:$a, MSA128WOpnd:$b)>; +def : MSAPat<(vfsetne_v2f64 MSA128DOpnd:$a, MSA128DOpnd:$b), + (FCNE_D MSA128DOpnd:$a, MSA128DOpnd:$b)>; diff --git a/lib/Target/Mips/MipsPreLegalizerCombiner.cpp b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp new file mode 100644 index 000000000000..1cff1c8396ea --- /dev/null +++ b/lib/Target/Mips/MipsPreLegalizerCombiner.cpp @@ -0,0 +1,92 @@ +//=== lib/CodeGen/GlobalISel/MipsPreLegalizerCombiner.cpp --------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// This pass does combining of machine instructions at the generic MI level, +// before the legalizer. +// +//===----------------------------------------------------------------------===// + +#include "MipsTargetMachine.h" +#include "llvm/CodeGen/GlobalISel/Combiner.h" +#include "llvm/CodeGen/GlobalISel/CombinerInfo.h" +#include "llvm/CodeGen/GlobalISel/MIPatternMatch.h" +#include "llvm/CodeGen/TargetPassConfig.h" + +#define DEBUG_TYPE "mips-prelegalizer-combiner" + +using namespace llvm; + +namespace { +class MipsPreLegalizerCombinerInfo : public CombinerInfo { +public: + MipsPreLegalizerCombinerInfo() + : CombinerInfo(/*AllowIllegalOps*/ true, /*ShouldLegalizeIllegal*/ false, + /*LegalizerInfo*/ nullptr) {} + virtual bool combine(GISelChangeObserver &Observer, MachineInstr &MI, + MachineIRBuilder &B) const override; +}; + +bool MipsPreLegalizerCombinerInfo::combine(GISelChangeObserver &Observer, + MachineInstr &MI, + MachineIRBuilder &B) const { + return false; +} + +// Pass boilerplate +// ================ + +class MipsPreLegalizerCombiner : public MachineFunctionPass { +public: + static char ID; + + MipsPreLegalizerCombiner(); + + StringRef getPassName() const override { return "MipsPreLegalizerCombiner"; } + + bool runOnMachineFunction(MachineFunction &MF) override; + + void getAnalysisUsage(AnalysisUsage &AU) const override; +}; +} // end anonymous namespace + +void MipsPreLegalizerCombiner::getAnalysisUsage(AnalysisUsage &AU) const { + AU.addRequired<TargetPassConfig>(); + AU.setPreservesCFG(); + getSelectionDAGFallbackAnalysisUsage(AU); + MachineFunctionPass::getAnalysisUsage(AU); +} + +MipsPreLegalizerCombiner::MipsPreLegalizerCombiner() : MachineFunctionPass(ID) { + initializeMipsPreLegalizerCombinerPass(*PassRegistry::getPassRegistry()); +} + +bool MipsPreLegalizerCombiner::runOnMachineFunction(MachineFunction &MF) { + if (MF.getProperties().hasProperty( + MachineFunctionProperties::Property::FailedISel)) + return false; + auto *TPC = &getAnalysis<TargetPassConfig>(); + MipsPreLegalizerCombinerInfo PCInfo; + Combiner C(PCInfo, TPC); + return C.combineMachineInstrs(MF, nullptr); +} + +char MipsPreLegalizerCombiner::ID = 0; +INITIALIZE_PASS_BEGIN(MipsPreLegalizerCombiner, DEBUG_TYPE, + "Combine Mips machine instrs before legalization", false, + false) +INITIALIZE_PASS_DEPENDENCY(TargetPassConfig) +INITIALIZE_PASS_END(MipsPreLegalizerCombiner, DEBUG_TYPE, + "Combine Mips machine instrs before legalization", false, + false) + +namespace llvm { +FunctionPass *createMipsPreLegalizeCombiner() { + return new MipsPreLegalizerCombiner(); +} +} // end namespace llvm diff --git a/lib/Target/Mips/MipsRegisterBankInfo.cpp b/lib/Target/Mips/MipsRegisterBankInfo.cpp index 351135079217..6af1f10189df 100644 --- a/lib/Target/Mips/MipsRegisterBankInfo.cpp +++ b/lib/Target/Mips/MipsRegisterBankInfo.cpp @@ -57,7 +57,10 @@ const RegisterBank &MipsRegisterBankInfo::getRegBankFromRegClass( switch (RC.getID()) { case Mips::GPR32RegClassID: case Mips::CPU16Regs_and_GPRMM16ZeroRegClassID: + case Mips::GPRMM16MovePPairFirstRegClassID: + case Mips::CPU16Regs_and_GPRMM16MovePPairSecondRegClassID: case Mips::GPRMM16MoveP_and_CPU16Regs_and_GPRMM16ZeroRegClassID: + case Mips::GPRMM16MovePPairFirst_and_GPRMM16MovePPairSecondRegClassID: case Mips::SP32RegClassID: return getRegBank(Mips::GPRBRegBankID); default: @@ -84,6 +87,16 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { case G_LOAD: case G_STORE: case G_GEP: + case G_AND: + case G_OR: + case G_XOR: + case G_SHL: + case G_ASHR: + case G_LSHR: + case G_SDIV: + case G_UDIV: + case G_SREM: + case G_UREM: OperandsMapping = &Mips::ValueMappings[Mips::GPRIdx]; break; case G_CONSTANT: @@ -92,6 +105,19 @@ MipsRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const { OperandsMapping = getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr}); break; + case G_ICMP: + OperandsMapping = + getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], nullptr, + &Mips::ValueMappings[Mips::GPRIdx], + &Mips::ValueMappings[Mips::GPRIdx]}); + break; + case G_SELECT: + OperandsMapping = + getOperandsMapping({&Mips::ValueMappings[Mips::GPRIdx], + &Mips::ValueMappings[Mips::GPRIdx], + &Mips::ValueMappings[Mips::GPRIdx], + &Mips::ValueMappings[Mips::GPRIdx]}); + break; default: return getInvalidInstructionMapping(); } diff --git a/lib/Target/Mips/MipsRegisterInfo.h b/lib/Target/Mips/MipsRegisterInfo.h index 4cc50fb981ba..b84aaad05eb5 100644 --- a/lib/Target/Mips/MipsRegisterInfo.h +++ b/lib/Target/Mips/MipsRegisterInfo.h @@ -57,8 +57,6 @@ public: BitVector getReservedRegs(const MachineFunction &MF) const override; - bool enableMultipleCopyHints() const override { return true; } - bool requiresRegisterScavenging(const MachineFunction &MF) const override; bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override; diff --git a/lib/Target/Mips/MipsRegisterInfo.td b/lib/Target/Mips/MipsRegisterInfo.td index c85ee20273c0..a943a0ad4094 100644 --- a/lib/Target/Mips/MipsRegisterInfo.td +++ b/lib/Target/Mips/MipsRegisterInfo.td @@ -335,6 +335,16 @@ def GPRMM16MoveP : RegisterClass<"Mips", [i32], 32, (add // Callee save S0, S2, S3, S4)>; +def GPRMM16MovePPairFirst : RegisterClass<"Mips", [i32], 32, (add + // Arguments + A0, A1, A2)>; + +def GPRMM16MovePPairSecond : RegisterClass<"Mips", [i32], 32, (add + // Arguments + A1, A2, A3, + // Callee save + S5, S6)>; + def GPR64 : RegisterClass<"Mips", [i64], 64, (add // Reserved ZERO_64, AT_64, @@ -522,6 +532,16 @@ def GPRMM16AsmOperandMoveP : MipsAsmRegOperand { let PredicateMethod = "isMM16AsmRegMoveP"; } +def GPRMM16AsmOperandMovePPairFirst : MipsAsmRegOperand { + let Name = "GPRMM16AsmRegMovePPairFirst"; + let PredicateMethod = "isMM16AsmRegMovePPairFirst"; +} + +def GPRMM16AsmOperandMovePPairSecond : MipsAsmRegOperand { + let Name = "GPRMM16AsmRegMovePPairSecond"; + let PredicateMethod = "isMM16AsmRegMovePPairSecond"; +} + def ACC64DSPAsmOperand : MipsAsmRegOperand { let Name = "ACC64DSPAsmReg"; let PredicateMethod = "isACCAsmReg"; @@ -613,6 +633,14 @@ def GPRMM16OpndMoveP : RegisterOperand<GPRMM16MoveP> { let EncoderMethod = "getMovePRegSingleOpValue"; } +def GPRMM16OpndMovePPairFirst : RegisterOperand<GPRMM16MovePPairFirst> { + let ParserMatchClass = GPRMM16AsmOperandMovePPairFirst; +} + +def GPRMM16OpndMovePPairSecond : RegisterOperand<GPRMM16MovePPairSecond> { + let ParserMatchClass = GPRMM16AsmOperandMovePPairSecond; +} + def GPR64Opnd : RegisterOperand<GPR64> { let ParserMatchClass = GPR64AsmOperand; } diff --git a/lib/Target/Mips/MipsSEFrameLowering.cpp b/lib/Target/Mips/MipsSEFrameLowering.cpp index 687c9f676b34..ef1b3c09bdc4 100644 --- a/lib/Target/Mips/MipsSEFrameLowering.cpp +++ b/lib/Target/Mips/MipsSEFrameLowering.cpp @@ -299,8 +299,12 @@ bool ExpandPseudo::expandBuildPairF64(MachineBasicBlock &MBB, // register). Unfortunately, we have to make this decision before register // allocation so for now we use a spill/reload sequence for all // double-precision values in regardless of being an odd/even register. - if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || - (FP64 && !Subtarget.useOddSPReg())) { + // + // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as + // implicit operand, so other passes (like ShrinkWrapping) are aware that + // stack is used. + if (I->getNumOperands() == 4 && I->getOperand(3).isReg() + && I->getOperand(3).getReg() == Mips::SP) { unsigned DstReg = I->getOperand(0).getReg(); unsigned LoReg = I->getOperand(1).getReg(); unsigned HiReg = I->getOperand(2).getReg(); @@ -360,9 +364,12 @@ bool ExpandPseudo::expandExtractElementF64(MachineBasicBlock &MBB, // register). Unfortunately, we have to make this decision before register // allocation so for now we use a spill/reload sequence for all // double-precision values in regardless of being an odd/even register. - - if ((Subtarget.isABI_FPXX() && !Subtarget.hasMTHC1()) || - (FP64 && !Subtarget.useOddSPReg())) { + // + // For the cases that should be covered here MipsSEISelDAGToDAG adds $sp as + // implicit operand, so other passes (like ShrinkWrapping) are aware that + // stack is used. + if (I->getNumOperands() == 4 && I->getOperand(3).isReg() + && I->getOperand(3).getReg() == Mips::SP) { unsigned DstReg = I->getOperand(0).getReg(); unsigned SrcReg = Op1.getReg(); unsigned N = Op2.getImm(); diff --git a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp index 599c1e913acf..cf196b597278 100644 --- a/lib/Target/Mips/MipsSEISelDAGToDAG.cpp +++ b/lib/Target/Mips/MipsSEISelDAGToDAG.cpp @@ -238,6 +238,18 @@ void MipsSEDAGToDAGISel::processFunctionAfterISel(MachineFunction &MF) { case Mips::WRDSP: addDSPCtrlRegOperands(true, MI, MF); break; + case Mips::BuildPairF64_64: + case Mips::ExtractElementF64_64: + if (!Subtarget->useOddSPReg()) { + MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true)); + break; + } + LLVM_FALLTHROUGH; + case Mips::BuildPairF64: + case Mips::ExtractElementF64: + if (Subtarget->isABI_FPXX() && !Subtarget->hasMTHC1()) + MI.addOperand(MachineOperand::CreateReg(Mips::SP, false, true)); + break; default: replaceUsesWithZeroReg(MRI, MI); } @@ -783,6 +795,24 @@ bool MipsSEDAGToDAGISel::trySelect(SDNode *Node) { switch(Opcode) { default: break; + case Mips::PseudoD_SELECT_I: + case Mips::PseudoD_SELECT_I64: { + MVT VT = Subtarget->isGP64bit() ? MVT::i64 : MVT::i32; + SDValue cond = Node->getOperand(0); + SDValue Hi1 = Node->getOperand(1); + SDValue Lo1 = Node->getOperand(2); + SDValue Hi2 = Node->getOperand(3); + SDValue Lo2 = Node->getOperand(4); + + SDValue ops[] = {cond, Hi1, Lo1, Hi2, Lo2}; + EVT NodeTys[] = {VT, VT}; + ReplaceNode(Node, CurDAG->getMachineNode(Subtarget->isGP64bit() + ? Mips::PseudoD_SELECT_I64 + : Mips::PseudoD_SELECT_I, + DL, NodeTys, ops)); + return true; + } + case ISD::ADDE: { selectAddE(Node, DL); return true; diff --git a/lib/Target/Mips/MipsSEISelLowering.cpp b/lib/Target/Mips/MipsSEISelLowering.cpp index f625a2903bd7..a78e544c35f0 100644 --- a/lib/Target/Mips/MipsSEISelLowering.cpp +++ b/lib/Target/Mips/MipsSEISelLowering.cpp @@ -158,8 +158,8 @@ MipsSETargetLowering::MipsSETargetLowering(const MipsTargetMachine &TM, setOperationAction(ISD::FTRUNC, MVT::f16, Promote); setOperationAction(ISD::FMINNUM, MVT::f16, Promote); setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); - setOperationAction(ISD::FMINNAN, MVT::f16, Promote); - setOperationAction(ISD::FMAXNAN, MVT::f16, Promote); + setOperationAction(ISD::FMINIMUM, MVT::f16, Promote); + setOperationAction(ISD::FMAXIMUM, MVT::f16, Promote); setTargetDAGCombine(ISD::AND); setTargetDAGCombine(ISD::OR); @@ -2360,24 +2360,6 @@ SDValue MipsSETargetLowering::lowerINTRINSIC_VOID(SDValue Op, } } -/// Check if the given BuildVectorSDNode is a splat. -/// This method currently relies on DAG nodes being reused when equivalent, -/// so it's possible for this to return false even when isConstantSplat returns -/// true. -static bool isSplatVector(const BuildVectorSDNode *N) { - unsigned int nOps = N->getNumOperands(); - assert(nOps > 1 && "isSplatVector has 0 or 1 sized build vector"); - - SDValue Operand0 = N->getOperand(0); - - for (unsigned int i = 1; i < nOps; ++i) { - if (N->getOperand(i) != Operand0) - return false; - } - - return true; -} - // Lower ISD::EXTRACT_VECTOR_ELT into MipsISD::VEXTRACT_SEXT_ELT. // // The non-value bits resulting from ISD::EXTRACT_VECTOR_ELT are undefined. We @@ -2488,7 +2470,7 @@ SDValue MipsSETargetLowering::lowerBUILD_VECTOR(SDValue Op, Result = DAG.getNode(ISD::BITCAST, SDLoc(Node), ResTy, Result); return Result; - } else if (isSplatVector(Node)) + } else if (DAG.isSplatValue(Op, /* AllowUndefs */ false)) return Op; else if (!isConstantOrUndefBUILD_VECTOR(Node)) { // Use INSERT_VECTOR_ELT operations rather than expand to stores. diff --git a/lib/Target/Mips/MipsSEInstrInfo.cpp b/lib/Target/Mips/MipsSEInstrInfo.cpp index 7ffe4aff474d..c7ab90ed2a3b 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.cpp +++ b/lib/Target/Mips/MipsSEInstrInfo.cpp @@ -25,9 +25,14 @@ using namespace llvm; +static unsigned getUnconditionalBranch(const MipsSubtarget &STI) { + if (STI.inMicroMipsMode()) + return STI.isPositionIndependent() ? Mips::B_MM : Mips::J_MM; + return STI.isPositionIndependent() ? Mips::B : Mips::J; +} + MipsSEInstrInfo::MipsSEInstrInfo(const MipsSubtarget &STI) - : MipsInstrInfo(STI, STI.isPositionIndependent() ? Mips::B : Mips::J), - RI() {} + : MipsInstrInfo(STI, getUnconditionalBranch(STI)), RI() {} const MipsRegisterInfo &MipsSEInstrInfo::getRegisterInfo() const { return RI; @@ -217,9 +222,9 @@ static bool isReadOrWriteToDSPReg(const MachineInstr &MI, bool &isWrite) { /// We check for the common case of 'or', as it's MIPS' preferred instruction /// for GPRs but we have to check the operands to ensure that is the case. /// Other move instructions for MIPS are directly identifiable. -bool MipsSEInstrInfo::isCopyInstr(const MachineInstr &MI, - const MachineOperand *&Src, - const MachineOperand *&Dest) const { +bool MipsSEInstrInfo::isCopyInstrImpl(const MachineInstr &MI, + const MachineOperand *&Src, + const MachineOperand *&Dest) const { bool isDSPControlWrite = false; // Condition is made to match the creation of WRDSP/RDDSP copy instruction // from copyPhysReg function. @@ -416,12 +421,16 @@ bool MipsSEInstrInfo::expandPostRAPseudo(MachineInstr &MI) const { expandERet(MBB, MI); break; case Mips::PseudoMFHI: - Opc = isMicroMips ? Mips::MFHI16_MM : Mips::MFHI; - expandPseudoMFHiLo(MBB, MI, Opc); + expandPseudoMFHiLo(MBB, MI, Mips::MFHI); + break; + case Mips::PseudoMFHI_MM: + expandPseudoMFHiLo(MBB, MI, Mips::MFHI16_MM); break; case Mips::PseudoMFLO: - Opc = isMicroMips ? Mips::MFLO16_MM : Mips::MFLO; - expandPseudoMFHiLo(MBB, MI, Opc); + expandPseudoMFHiLo(MBB, MI, Mips::MFLO); + break; + case Mips::PseudoMFLO_MM: + expandPseudoMFHiLo(MBB, MI, Mips::MFLO16_MM); break; case Mips::PseudoMFHI64: expandPseudoMFHiLo(MBB, MI, Mips::MFHI64); @@ -643,7 +652,7 @@ unsigned MipsSEInstrInfo::getAnalyzableBrOpc(unsigned Opc) const { Opc == Mips::BNE64 || Opc == Mips::BGTZ64 || Opc == Mips::BGEZ64 || Opc == Mips::BLTZ64 || Opc == Mips::BLEZ64 || Opc == Mips::BC1T || Opc == Mips::BC1F || Opc == Mips::B || Opc == Mips::J || - Opc == Mips::B_MM || Opc == Mips::BEQZC_MM || + Opc == Mips::J_MM || Opc == Mips::B_MM || Opc == Mips::BEQZC_MM || Opc == Mips::BNEZC_MM || Opc == Mips::BEQC || Opc == Mips::BNEC || Opc == Mips::BLTC || Opc == Mips::BGEC || Opc == Mips::BLTUC || Opc == Mips::BGEUC || Opc == Mips::BGTZC || Opc == Mips::BLEZC || diff --git a/lib/Target/Mips/MipsSEInstrInfo.h b/lib/Target/Mips/MipsSEInstrInfo.h index fc55716d598a..fce0fe5f58ad 100644 --- a/lib/Target/Mips/MipsSEInstrInfo.h +++ b/lib/Target/Mips/MipsSEInstrInfo.h @@ -47,9 +47,6 @@ public: const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override; - bool isCopyInstr(const MachineInstr &MI, const MachineOperand *&Src, - const MachineOperand *&Dest) const override; - void storeRegToStack(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, unsigned SrcReg, bool isKill, int FrameIndex, @@ -79,6 +76,13 @@ public: MachineBasicBlock::iterator II, const DebugLoc &DL, unsigned *NewImm) const; +protected: + /// If the specific machine instruction is a instruction that moves/copies + /// value from one register to another register return true along with + /// @Source machine operand and @Destination machine operand. + bool isCopyInstrImpl(const MachineInstr &MI, const MachineOperand *&Source, + const MachineOperand *&Destination) const override; + private: unsigned getAnalyzableBrOpc(unsigned Opc) const override; diff --git a/lib/Target/Mips/MipsSchedule.td b/lib/Target/Mips/MipsSchedule.td index 64db815a0f4c..410fa655a225 100644 --- a/lib/Target/Mips/MipsSchedule.td +++ b/lib/Target/Mips/MipsSchedule.td @@ -154,6 +154,7 @@ def II_DERET : InstrItinClass; def II_ERETNC : InstrItinClass; def II_EHB : InstrItinClass; def II_SDBBP : InstrItinClass; +def II_SIGRIE : InstrItinClass; def II_SSNOP : InstrItinClass; def II_SYSCALL : InstrItinClass; def II_PAUSE : InstrItinClass; @@ -546,6 +547,7 @@ def MipsGenericItineraries : ProcessorItineraries<[ALU, IMULDIV], [], [ InstrItinData<II_ERETNC , [InstrStage<1, [ALU]>]>, InstrItinData<II_EHB , [InstrStage<1, [ALU]>]>, InstrItinData<II_SDBBP , [InstrStage<1, [ALU]>]>, + InstrItinData<II_SIGRIE , [InstrStage<1, [ALU]>]>, InstrItinData<II_SSNOP , [InstrStage<1, [ALU]>]>, InstrItinData<II_SYSCALL , [InstrStage<1, [ALU]>]>, InstrItinData<II_PAUSE , [InstrStage<1, [ALU]>]>, diff --git a/lib/Target/Mips/MipsScheduleGeneric.td b/lib/Target/Mips/MipsScheduleGeneric.td index 79c55dbb9e03..80ffe7ada7c8 100644 --- a/lib/Target/Mips/MipsScheduleGeneric.td +++ b/lib/Target/Mips/MipsScheduleGeneric.td @@ -179,7 +179,7 @@ def GenericWriteTrap : SchedWriteRes<[GenericIssueCTISTD]>; def : ItinRW<[GenericWriteTrap], [II_BREAK, II_SYSCALL, II_TEQ, II_TEQI, II_TGE, II_TGEI, II_TGEIU, II_TGEU, II_TNE, II_TNEI, II_TLT, II_TLTI, II_TLTU, II_TTLTIU, - II_TRAP, II_SDBBP]>; + II_TRAP, II_SDBBP, II_SIGRIE]>; // COP0 Pipeline // ============= diff --git a/lib/Target/Mips/MipsSubtarget.h b/lib/Target/Mips/MipsSubtarget.h index 896dd0eb0a5e..ad8f4848b870 100644 --- a/lib/Target/Mips/MipsSubtarget.h +++ b/lib/Target/Mips/MipsSubtarget.h @@ -295,8 +295,10 @@ public: bool inMips16HardFloat() const { return inMips16Mode() && InMips16HardFloat; } - bool inMicroMipsMode() const { return InMicroMipsMode; } - bool inMicroMips32r6Mode() const { return InMicroMipsMode && hasMips32r6(); } + bool inMicroMipsMode() const { return InMicroMipsMode && !InMips16Mode; } + bool inMicroMips32r6Mode() const { + return inMicroMipsMode() && hasMips32r6(); + } bool hasDSP() const { return HasDSP; } bool hasDSPR2() const { return HasDSPR2; } bool hasDSPR3() const { return HasDSPR3; } @@ -312,14 +314,14 @@ public: } bool useSmallSection() const { return UseSmallSection; } - bool hasStandardEncoding() const { return !inMips16Mode(); } + bool hasStandardEncoding() const { return !InMips16Mode && !InMicroMipsMode; } bool useSoftFloat() const { return IsSoftFloat; } bool useLongCalls() const { return UseLongCalls; } bool enableLongBranchPass() const { - return hasStandardEncoding() || allowMixed16_32(); + return hasStandardEncoding() || inMicroMipsMode() || allowMixed16_32(); } /// Features related to the presence of specific instructions. diff --git a/lib/Target/Mips/MipsTargetMachine.cpp b/lib/Target/Mips/MipsTargetMachine.cpp index 1e6fe2b9f7e7..8466298cf36f 100644 --- a/lib/Target/Mips/MipsTargetMachine.cpp +++ b/lib/Target/Mips/MipsTargetMachine.cpp @@ -56,6 +56,7 @@ extern "C" void LLVMInitializeMipsTarget() { initializeMipsDelaySlotFillerPass(*PR); initializeMipsBranchExpansionPass(*PR); initializeMicroMipsSizeReducePass(*PR); + initializeMipsPreLegalizerCombinerPass(*PR); } static std::string computeDataLayout(const Triple &TT, StringRef CPU, @@ -101,12 +102,6 @@ static Reloc::Model getEffectiveRelocModel(bool JIT, return *RM; } -static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM) { - if (CM) - return *CM; - return CodeModel::Small; -} - // On function prologue, the stack is created by decrementing // its pointer. Once decremented, all references are done with positive // offset from the stack/frame pointer, using StackGrowsUp enables @@ -121,7 +116,7 @@ MipsTargetMachine::MipsTargetMachine(const Target &T, const Triple &TT, bool isLittle) : LLVMTargetMachine(T, computeDataLayout(TT, CPU, Options, isLittle), TT, CPU, FS, Options, getEffectiveRelocModel(JIT, RM), - getEffectiveCodeModel(CM), OL), + getEffectiveCodeModel(CM, CodeModel::Small), OL), isLittle(isLittle), TLOF(llvm::make_unique<MipsTargetObjectFile>()), ABI(MipsABIInfo::computeTargetABI(TT, CPU, Options.MCOptions)), Subtarget(nullptr), DefaultSubtarget(TT, CPU, FS, isLittle, *this, @@ -240,8 +235,8 @@ public: bool addInstSelector() override; void addPreEmitPass() override; void addPreRegAlloc() override; - void addPreEmit2() ; bool addIRTranslator() override; + void addPreLegalizeMachineIR() override; bool addLegalizeMachineIR() override; bool addRegBankSelect() override; bool addGlobalInstructionSelect() override; @@ -286,9 +281,6 @@ MipsTargetMachine::getTargetTransformInfo(const Function &F) { return TargetTransformInfo(BasicTTIImpl(this, F)); } -void MipsPassConfig::addPreEmit2() { -} - // Implemented by targets that want to run passes immediately before // machine code is emitted. return true if -print-machineinstrs should // print out the code after the passes. @@ -322,6 +314,10 @@ bool MipsPassConfig::addIRTranslator() { return false; } +void MipsPassConfig::addPreLegalizeMachineIR() { + addPass(createMipsPreLegalizeCombiner()); +} + bool MipsPassConfig::addLegalizeMachineIR() { addPass(new Legalizer()); return false; diff --git a/lib/Target/Mips/MipsTargetObjectFile.cpp b/lib/Target/Mips/MipsTargetObjectFile.cpp index f767c8321988..f53ee0631b5e 100644 --- a/lib/Target/Mips/MipsTargetObjectFile.cpp +++ b/lib/Target/Mips/MipsTargetObjectFile.cpp @@ -10,6 +10,7 @@ #include "MipsTargetObjectFile.h" #include "MipsSubtarget.h" #include "MipsTargetMachine.h" +#include "MCTargetDesc/MipsMCExpr.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/IR/DataLayout.h" #include "llvm/IR/DerivedTypes.h" @@ -189,6 +190,7 @@ const MCExpr * MipsTargetObjectFile::getDebugThreadLocalSymbol(const MCSymbol *Sym) const { const MCExpr *Expr = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None, getContext()); - return MCBinaryExpr::createAdd( + Expr = MCBinaryExpr::createAdd( Expr, MCConstantExpr::create(0x8000, getContext()), getContext()); + return MipsMCExpr::create(MipsMCExpr::MEK_DTPREL, Expr, getContext()); } diff --git a/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp b/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp index ab494d5bf41b..22be564b6502 100644 --- a/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp +++ b/lib/Target/Mips/TargetInfo/MipsTargetInfo.cpp @@ -32,17 +32,18 @@ Target &llvm::getTheMips64elTarget() { extern "C" void LLVMInitializeMipsTargetInfo() { RegisterTarget<Triple::mips, /*HasJIT=*/true> - X(getTheMipsTarget(), "mips", "Mips", "Mips"); + X(getTheMipsTarget(), "mips", "MIPS (32-bit big endian)", "Mips"); RegisterTarget<Triple::mipsel, /*HasJIT=*/true> - Y(getTheMipselTarget(), "mipsel", "Mipsel", "Mips"); + Y(getTheMipselTarget(), "mipsel", "MIPS (32-bit little endian)", "Mips"); RegisterTarget<Triple::mips64, /*HasJIT=*/true> - A(getTheMips64Target(), "mips64", "Mips64 [experimental]", "Mips"); + A(getTheMips64Target(), "mips64", "MIPS (64-bit big endian)", "Mips"); RegisterTarget<Triple::mips64el, /*HasJIT=*/true> - B(getTheMips64elTarget(), "mips64el", "Mips64el [experimental]", "Mips"); + B(getTheMips64elTarget(), "mips64el", "MIPS (64-bit little endian)", + "Mips"); } |