aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm/lib/Target/Sparc
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/lib/Target/Sparc')
-rw-r--r--contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp1312
-rw-r--r--contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp512
-rw-r--r--contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp670
-rw-r--r--contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp197
-rw-r--r--contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h55
-rwxr-xr-xcontrib/llvm/lib/Target/Sparc/LeonFeatures.td68
-rwxr-xr-xcontrib/llvm/lib/Target/Sparc/LeonPasses.cpp158
-rwxr-xr-xcontrib/llvm/lib/Target/Sparc/LeonPasses.h88
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp309
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp140
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h97
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp73
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h40
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp238
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp220
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h112
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp125
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h65
-rw-r--r--contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp46
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.h167
-rw-r--r--contrib/llvm/lib/Target/Sparc/Sparc.td179
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp449
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcCallingConv.td144
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp367
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h68
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp405
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp3603
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcISelLowering.h223
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td541
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td506
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td369
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp510
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h108
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td1694
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td263
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp108
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp14
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h56
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp237
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h50
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td379
-rwxr-xr-xcontrib/llvm/lib/Target/Sparc/SparcSchedule.td124
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp102
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcSubtarget.h124
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp213
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h83
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp48
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h37
-rw-r--r--contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h49
-rw-r--r--contrib/llvm/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp35
50 files changed, 15780 insertions, 0 deletions
diff --git a/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp b/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
new file mode 100644
index 000000000000..05f78a48badf
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/AsmParser/SparcAsmParser.cpp
@@ -0,0 +1,1312 @@
+//===-- SparcAsmParser.cpp - Parse Sparc assembly to MCInst instructions --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "MCTargetDesc/SparcMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
+#include "llvm/MC/MCParser/MCTargetAsmParser.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SMLoc.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+using namespace llvm;
+
+// The generated AsmMatcher SparcGenAsmMatcher uses "Sparc" as the target
+// namespace. But SPARC backend uses "SP" as its namespace.
+namespace llvm {
+namespace Sparc {
+
+ using namespace SP;
+
+} // end namespace Sparc
+} // end namespace llvm
+
+namespace {
+
+class SparcOperand;
+
+class SparcAsmParser : public MCTargetAsmParser {
+ MCAsmParser &Parser;
+
+ /// @name Auto-generated Match Functions
+ /// {
+
+#define GET_ASSEMBLER_HEADER
+#include "SparcGenAsmMatcher.inc"
+
+ /// }
+
+ // public interface of the MCTargetAsmParser.
+ bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands, MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) override;
+ bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
+ bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
+ SMLoc NameLoc, OperandVector &Operands) override;
+ bool ParseDirective(AsmToken DirectiveID) override;
+
+ unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
+ unsigned Kind) override;
+
+ // Custom parse functions for Sparc specific operands.
+ OperandMatchResultTy parseMEMOperand(OperandVector &Operands);
+
+ OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Name);
+
+ OperandMatchResultTy
+ parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Operand,
+ bool isCall = false);
+
+ OperandMatchResultTy parseBranchModifiers(OperandVector &Operands);
+
+ // Helper function for dealing with %lo / %hi in PIC mode.
+ const SparcMCExpr *adjustPICRelocation(SparcMCExpr::VariantKind VK,
+ const MCExpr *subExpr);
+
+ // returns true if Tok is matched to a register and returns register in RegNo.
+ bool matchRegisterName(const AsmToken &Tok, unsigned &RegNo,
+ unsigned &RegKind);
+
+ bool matchSparcAsmModifiers(const MCExpr *&EVal, SMLoc &EndLoc);
+ bool parseDirectiveWord(unsigned Size, SMLoc L);
+
+ bool is64Bit() const {
+ return getSTI().getTargetTriple().getArch() == Triple::sparcv9;
+ }
+
+ bool expandSET(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions);
+
+public:
+ SparcAsmParser(const MCSubtargetInfo &sti, MCAsmParser &parser,
+ const MCInstrInfo &MII,
+ const MCTargetOptions &Options)
+ : MCTargetAsmParser(Options, sti, MII), Parser(parser) {
+ // Initialize the set of available features.
+ setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
+ }
+};
+
+} // end anonymous namespace
+
+ static const MCPhysReg IntRegs[32] = {
+ Sparc::G0, Sparc::G1, Sparc::G2, Sparc::G3,
+ Sparc::G4, Sparc::G5, Sparc::G6, Sparc::G7,
+ Sparc::O0, Sparc::O1, Sparc::O2, Sparc::O3,
+ Sparc::O4, Sparc::O5, Sparc::O6, Sparc::O7,
+ Sparc::L0, Sparc::L1, Sparc::L2, Sparc::L3,
+ Sparc::L4, Sparc::L5, Sparc::L6, Sparc::L7,
+ Sparc::I0, Sparc::I1, Sparc::I2, Sparc::I3,
+ Sparc::I4, Sparc::I5, Sparc::I6, Sparc::I7 };
+
+ static const MCPhysReg FloatRegs[32] = {
+ Sparc::F0, Sparc::F1, Sparc::F2, Sparc::F3,
+ Sparc::F4, Sparc::F5, Sparc::F6, Sparc::F7,
+ Sparc::F8, Sparc::F9, Sparc::F10, Sparc::F11,
+ Sparc::F12, Sparc::F13, Sparc::F14, Sparc::F15,
+ Sparc::F16, Sparc::F17, Sparc::F18, Sparc::F19,
+ Sparc::F20, Sparc::F21, Sparc::F22, Sparc::F23,
+ Sparc::F24, Sparc::F25, Sparc::F26, Sparc::F27,
+ Sparc::F28, Sparc::F29, Sparc::F30, Sparc::F31 };
+
+ static const MCPhysReg DoubleRegs[32] = {
+ Sparc::D0, Sparc::D1, Sparc::D2, Sparc::D3,
+ Sparc::D4, Sparc::D5, Sparc::D6, Sparc::D7,
+ Sparc::D8, Sparc::D9, Sparc::D10, Sparc::D11,
+ Sparc::D12, Sparc::D13, Sparc::D14, Sparc::D15,
+ Sparc::D16, Sparc::D17, Sparc::D18, Sparc::D19,
+ Sparc::D20, Sparc::D21, Sparc::D22, Sparc::D23,
+ Sparc::D24, Sparc::D25, Sparc::D26, Sparc::D27,
+ Sparc::D28, Sparc::D29, Sparc::D30, Sparc::D31 };
+
+ static const MCPhysReg QuadFPRegs[32] = {
+ Sparc::Q0, Sparc::Q1, Sparc::Q2, Sparc::Q3,
+ Sparc::Q4, Sparc::Q5, Sparc::Q6, Sparc::Q7,
+ Sparc::Q8, Sparc::Q9, Sparc::Q10, Sparc::Q11,
+ Sparc::Q12, Sparc::Q13, Sparc::Q14, Sparc::Q15 };
+
+ static const MCPhysReg ASRRegs[32] = {
+ SP::Y, SP::ASR1, SP::ASR2, SP::ASR3,
+ SP::ASR4, SP::ASR5, SP::ASR6, SP::ASR7,
+ SP::ASR8, SP::ASR9, SP::ASR10, SP::ASR11,
+ SP::ASR12, SP::ASR13, SP::ASR14, SP::ASR15,
+ SP::ASR16, SP::ASR17, SP::ASR18, SP::ASR19,
+ SP::ASR20, SP::ASR21, SP::ASR22, SP::ASR23,
+ SP::ASR24, SP::ASR25, SP::ASR26, SP::ASR27,
+ SP::ASR28, SP::ASR29, SP::ASR30, SP::ASR31};
+
+ static const MCPhysReg IntPairRegs[] = {
+ Sparc::G0_G1, Sparc::G2_G3, Sparc::G4_G5, Sparc::G6_G7,
+ Sparc::O0_O1, Sparc::O2_O3, Sparc::O4_O5, Sparc::O6_O7,
+ Sparc::L0_L1, Sparc::L2_L3, Sparc::L4_L5, Sparc::L6_L7,
+ Sparc::I0_I1, Sparc::I2_I3, Sparc::I4_I5, Sparc::I6_I7};
+
+ static const MCPhysReg CoprocRegs[32] = {
+ Sparc::C0, Sparc::C1, Sparc::C2, Sparc::C3,
+ Sparc::C4, Sparc::C5, Sparc::C6, Sparc::C7,
+ Sparc::C8, Sparc::C9, Sparc::C10, Sparc::C11,
+ Sparc::C12, Sparc::C13, Sparc::C14, Sparc::C15,
+ Sparc::C16, Sparc::C17, Sparc::C18, Sparc::C19,
+ Sparc::C20, Sparc::C21, Sparc::C22, Sparc::C23,
+ Sparc::C24, Sparc::C25, Sparc::C26, Sparc::C27,
+ Sparc::C28, Sparc::C29, Sparc::C30, Sparc::C31 };
+
+ static const MCPhysReg CoprocPairRegs[] = {
+ Sparc::C0_C1, Sparc::C2_C3, Sparc::C4_C5, Sparc::C6_C7,
+ Sparc::C8_C9, Sparc::C10_C11, Sparc::C12_C13, Sparc::C14_C15,
+ Sparc::C16_C17, Sparc::C18_C19, Sparc::C20_C21, Sparc::C22_C23,
+ Sparc::C24_C25, Sparc::C26_C27, Sparc::C28_C29, Sparc::C30_C31};
+
+namespace {
+
+/// SparcOperand - Instances of this class represent a parsed Sparc machine
+/// instruction.
+class SparcOperand : public MCParsedAsmOperand {
+public:
+ enum RegisterKind {
+ rk_None,
+ rk_IntReg,
+ rk_IntPairReg,
+ rk_FloatReg,
+ rk_DoubleReg,
+ rk_QuadReg,
+ rk_CoprocReg,
+ rk_CoprocPairReg,
+ rk_Special,
+ };
+
+private:
+ enum KindTy {
+ k_Token,
+ k_Register,
+ k_Immediate,
+ k_MemoryReg,
+ k_MemoryImm
+ } Kind;
+
+ SMLoc StartLoc, EndLoc;
+
+ struct Token {
+ const char *Data;
+ unsigned Length;
+ };
+
+ struct RegOp {
+ unsigned RegNum;
+ RegisterKind Kind;
+ };
+
+ struct ImmOp {
+ const MCExpr *Val;
+ };
+
+ struct MemOp {
+ unsigned Base;
+ unsigned OffsetReg;
+ const MCExpr *Off;
+ };
+
+ union {
+ struct Token Tok;
+ struct RegOp Reg;
+ struct ImmOp Imm;
+ struct MemOp Mem;
+ };
+
+public:
+ SparcOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
+
+ bool isToken() const override { return Kind == k_Token; }
+ bool isReg() const override { return Kind == k_Register; }
+ bool isImm() const override { return Kind == k_Immediate; }
+ bool isMem() const override { return isMEMrr() || isMEMri(); }
+ bool isMEMrr() const { return Kind == k_MemoryReg; }
+ bool isMEMri() const { return Kind == k_MemoryImm; }
+
+ bool isIntReg() const {
+ return (Kind == k_Register && Reg.Kind == rk_IntReg);
+ }
+
+ bool isFloatReg() const {
+ return (Kind == k_Register && Reg.Kind == rk_FloatReg);
+ }
+
+ bool isFloatOrDoubleReg() const {
+ return (Kind == k_Register && (Reg.Kind == rk_FloatReg
+ || Reg.Kind == rk_DoubleReg));
+ }
+
+ bool isCoprocReg() const {
+ return (Kind == k_Register && Reg.Kind == rk_CoprocReg);
+ }
+
+ StringRef getToken() const {
+ assert(Kind == k_Token && "Invalid access!");
+ return StringRef(Tok.Data, Tok.Length);
+ }
+
+ unsigned getReg() const override {
+ assert((Kind == k_Register) && "Invalid access!");
+ return Reg.RegNum;
+ }
+
+ const MCExpr *getImm() const {
+ assert((Kind == k_Immediate) && "Invalid access!");
+ return Imm.Val;
+ }
+
+ unsigned getMemBase() const {
+ assert((Kind == k_MemoryReg || Kind == k_MemoryImm) && "Invalid access!");
+ return Mem.Base;
+ }
+
+ unsigned getMemOffsetReg() const {
+ assert((Kind == k_MemoryReg) && "Invalid access!");
+ return Mem.OffsetReg;
+ }
+
+ const MCExpr *getMemOff() const {
+ assert((Kind == k_MemoryImm) && "Invalid access!");
+ return Mem.Off;
+ }
+
+ /// getStartLoc - Get the location of the first token of this operand.
+ SMLoc getStartLoc() const override {
+ return StartLoc;
+ }
+ /// getEndLoc - Get the location of the last token of this operand.
+ SMLoc getEndLoc() const override {
+ return EndLoc;
+ }
+
+ void print(raw_ostream &OS) const override {
+ switch (Kind) {
+ case k_Token: OS << "Token: " << getToken() << "\n"; break;
+ case k_Register: OS << "Reg: #" << getReg() << "\n"; break;
+ case k_Immediate: OS << "Imm: " << getImm() << "\n"; break;
+ case k_MemoryReg: OS << "Mem: " << getMemBase() << "+"
+ << getMemOffsetReg() << "\n"; break;
+ case k_MemoryImm: assert(getMemOff() != nullptr);
+ OS << "Mem: " << getMemBase()
+ << "+" << *getMemOff()
+ << "\n"; break;
+ }
+ }
+
+ void addRegOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ Inst.addOperand(MCOperand::createReg(getReg()));
+ }
+
+ void addImmOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 1 && "Invalid number of operands!");
+ const MCExpr *Expr = getImm();
+ addExpr(Inst, Expr);
+ }
+
+ void addExpr(MCInst &Inst, const MCExpr *Expr) const{
+ // Add as immediate when possible. Null MCExpr = 0.
+ if (!Expr)
+ Inst.addOperand(MCOperand::createImm(0));
+ else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
+ Inst.addOperand(MCOperand::createImm(CE->getValue()));
+ else
+ Inst.addOperand(MCOperand::createExpr(Expr));
+ }
+
+ void addMEMrrOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::createReg(getMemBase()));
+
+ assert(getMemOffsetReg() != 0 && "Invalid offset");
+ Inst.addOperand(MCOperand::createReg(getMemOffsetReg()));
+ }
+
+ void addMEMriOperands(MCInst &Inst, unsigned N) const {
+ assert(N == 2 && "Invalid number of operands!");
+
+ Inst.addOperand(MCOperand::createReg(getMemBase()));
+
+ const MCExpr *Expr = getMemOff();
+ addExpr(Inst, Expr);
+ }
+
+ static std::unique_ptr<SparcOperand> CreateToken(StringRef Str, SMLoc S) {
+ auto Op = make_unique<SparcOperand>(k_Token);
+ Op->Tok.Data = Str.data();
+ Op->Tok.Length = Str.size();
+ Op->StartLoc = S;
+ Op->EndLoc = S;
+ return Op;
+ }
+
+ static std::unique_ptr<SparcOperand> CreateReg(unsigned RegNum, unsigned Kind,
+ SMLoc S, SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_Register);
+ Op->Reg.RegNum = RegNum;
+ Op->Reg.Kind = (SparcOperand::RegisterKind)Kind;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static std::unique_ptr<SparcOperand> CreateImm(const MCExpr *Val, SMLoc S,
+ SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_Immediate);
+ Op->Imm.Val = Val;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static bool MorphToIntPairReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ assert(Op.Reg.Kind == rk_IntReg);
+ unsigned regIdx = 32;
+ if (Reg >= Sparc::G0 && Reg <= Sparc::G7)
+ regIdx = Reg - Sparc::G0;
+ else if (Reg >= Sparc::O0 && Reg <= Sparc::O7)
+ regIdx = Reg - Sparc::O0 + 8;
+ else if (Reg >= Sparc::L0 && Reg <= Sparc::L7)
+ regIdx = Reg - Sparc::L0 + 16;
+ else if (Reg >= Sparc::I0 && Reg <= Sparc::I7)
+ regIdx = Reg - Sparc::I0 + 24;
+ if (regIdx % 2 || regIdx > 31)
+ return false;
+ Op.Reg.RegNum = IntPairRegs[regIdx / 2];
+ Op.Reg.Kind = rk_IntPairReg;
+ return true;
+ }
+
+ static bool MorphToDoubleReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ assert(Op.Reg.Kind == rk_FloatReg);
+ unsigned regIdx = Reg - Sparc::F0;
+ if (regIdx % 2 || regIdx > 31)
+ return false;
+ Op.Reg.RegNum = DoubleRegs[regIdx / 2];
+ Op.Reg.Kind = rk_DoubleReg;
+ return true;
+ }
+
+ static bool MorphToQuadReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ unsigned regIdx = 0;
+ switch (Op.Reg.Kind) {
+ default: llvm_unreachable("Unexpected register kind!");
+ case rk_FloatReg:
+ regIdx = Reg - Sparc::F0;
+ if (regIdx % 4 || regIdx > 31)
+ return false;
+ Reg = QuadFPRegs[regIdx / 4];
+ break;
+ case rk_DoubleReg:
+ regIdx = Reg - Sparc::D0;
+ if (regIdx % 2 || regIdx > 31)
+ return false;
+ Reg = QuadFPRegs[regIdx / 2];
+ break;
+ }
+ Op.Reg.RegNum = Reg;
+ Op.Reg.Kind = rk_QuadReg;
+ return true;
+ }
+
+ static bool MorphToCoprocPairReg(SparcOperand &Op) {
+ unsigned Reg = Op.getReg();
+ assert(Op.Reg.Kind == rk_CoprocReg);
+ unsigned regIdx = 32;
+ if (Reg >= Sparc::C0 && Reg <= Sparc::C31)
+ regIdx = Reg - Sparc::C0;
+ if (regIdx % 2 || regIdx > 31)
+ return false;
+ Op.Reg.RegNum = CoprocPairRegs[regIdx / 2];
+ Op.Reg.Kind = rk_CoprocPairReg;
+ return true;
+ }
+
+ static std::unique_ptr<SparcOperand>
+ MorphToMEMrr(unsigned Base, std::unique_ptr<SparcOperand> Op) {
+ unsigned offsetReg = Op->getReg();
+ Op->Kind = k_MemoryReg;
+ Op->Mem.Base = Base;
+ Op->Mem.OffsetReg = offsetReg;
+ Op->Mem.Off = nullptr;
+ return Op;
+ }
+
+ static std::unique_ptr<SparcOperand>
+ CreateMEMr(unsigned Base, SMLoc S, SMLoc E) {
+ auto Op = make_unique<SparcOperand>(k_MemoryReg);
+ Op->Mem.Base = Base;
+ Op->Mem.OffsetReg = Sparc::G0; // always 0
+ Op->Mem.Off = nullptr;
+ Op->StartLoc = S;
+ Op->EndLoc = E;
+ return Op;
+ }
+
+ static std::unique_ptr<SparcOperand>
+ MorphToMEMri(unsigned Base, std::unique_ptr<SparcOperand> Op) {
+ const MCExpr *Imm = Op->getImm();
+ Op->Kind = k_MemoryImm;
+ Op->Mem.Base = Base;
+ Op->Mem.OffsetReg = 0;
+ Op->Mem.Off = Imm;
+ return Op;
+ }
+};
+
+} // end anonymous namespace
+
+bool SparcAsmParser::expandSET(MCInst &Inst, SMLoc IDLoc,
+ SmallVectorImpl<MCInst> &Instructions) {
+ MCOperand MCRegOp = Inst.getOperand(0);
+ MCOperand MCValOp = Inst.getOperand(1);
+ assert(MCRegOp.isReg());
+ assert(MCValOp.isImm() || MCValOp.isExpr());
+
+ // the imm operand can be either an expression or an immediate.
+ bool IsImm = Inst.getOperand(1).isImm();
+ int64_t RawImmValue = IsImm ? MCValOp.getImm() : 0;
+
+ // Allow either a signed or unsigned 32-bit immediate.
+ if (RawImmValue < -2147483648LL || RawImmValue > 4294967295LL) {
+ return Error(IDLoc,
+ "set: argument must be between -2147483648 and 4294967295");
+ }
+
+ // If the value was expressed as a large unsigned number, that's ok.
+ // We want to see if it "looks like" a small signed number.
+ int32_t ImmValue = RawImmValue;
+ // For 'set' you can't use 'or' with a negative operand on V9 because
+ // that would splat the sign bit across the upper half of the destination
+ // register, whereas 'set' is defined to zero the high 32 bits.
+ bool IsEffectivelyImm13 =
+ IsImm && ((is64Bit() ? 0 : -4096) <= ImmValue && ImmValue < 4096);
+ const MCExpr *ValExpr;
+ if (IsImm)
+ ValExpr = MCConstantExpr::create(ImmValue, getContext());
+ else
+ ValExpr = MCValOp.getExpr();
+
+ MCOperand PrevReg = MCOperand::createReg(Sparc::G0);
+
+ // If not just a signed imm13 value, then either we use a 'sethi' with a
+ // following 'or', or a 'sethi' by itself if there are no more 1 bits.
+ // In either case, start with the 'sethi'.
+ if (!IsEffectivelyImm13) {
+ MCInst TmpInst;
+ const MCExpr *Expr = adjustPICRelocation(SparcMCExpr::VK_Sparc_HI, ValExpr);
+ TmpInst.setLoc(IDLoc);
+ TmpInst.setOpcode(SP::SETHIi);
+ TmpInst.addOperand(MCRegOp);
+ TmpInst.addOperand(MCOperand::createExpr(Expr));
+ Instructions.push_back(TmpInst);
+ PrevReg = MCRegOp;
+ }
+
+ // The low bits require touching in 3 cases:
+ // * A non-immediate value will always require both instructions.
+ // * An effectively imm13 value needs only an 'or' instruction.
+ // * Otherwise, an immediate that is not effectively imm13 requires the
+ // 'or' only if bits remain after clearing the 22 bits that 'sethi' set.
+ // If the low bits are known zeros, there's nothing to do.
+ // In the second case, and only in that case, must we NOT clear
+ // bits of the immediate value via the %lo() assembler function.
+ // Note also, the 'or' instruction doesn't mind a large value in the case
+ // where the operand to 'set' was 0xFFFFFzzz - it does exactly what you mean.
+ if (!IsImm || IsEffectivelyImm13 || (ImmValue & 0x3ff)) {
+ MCInst TmpInst;
+ const MCExpr *Expr;
+ if (IsEffectivelyImm13)
+ Expr = ValExpr;
+ else
+ Expr = adjustPICRelocation(SparcMCExpr::VK_Sparc_LO, ValExpr);
+ TmpInst.setLoc(IDLoc);
+ TmpInst.setOpcode(SP::ORri);
+ TmpInst.addOperand(MCRegOp);
+ TmpInst.addOperand(PrevReg);
+ TmpInst.addOperand(MCOperand::createExpr(Expr));
+ Instructions.push_back(TmpInst);
+ }
+ return false;
+}
+
+bool SparcAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
+ OperandVector &Operands,
+ MCStreamer &Out,
+ uint64_t &ErrorInfo,
+ bool MatchingInlineAsm) {
+ MCInst Inst;
+ SmallVector<MCInst, 8> Instructions;
+ unsigned MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
+ MatchingInlineAsm);
+ switch (MatchResult) {
+ case Match_Success: {
+ switch (Inst.getOpcode()) {
+ default:
+ Inst.setLoc(IDLoc);
+ Instructions.push_back(Inst);
+ break;
+ case SP::SET:
+ if (expandSET(Inst, IDLoc, Instructions))
+ return true;
+ break;
+ }
+
+ for (const MCInst &I : Instructions) {
+ Out.EmitInstruction(I, getSTI());
+ }
+ return false;
+ }
+
+ case Match_MissingFeature:
+ return Error(IDLoc,
+ "instruction requires a CPU feature not currently enabled");
+
+ case Match_InvalidOperand: {
+ SMLoc ErrorLoc = IDLoc;
+ if (ErrorInfo != ~0ULL) {
+ if (ErrorInfo >= Operands.size())
+ return Error(IDLoc, "too few operands for instruction");
+
+ ErrorLoc = ((SparcOperand &)*Operands[ErrorInfo]).getStartLoc();
+ if (ErrorLoc == SMLoc())
+ ErrorLoc = IDLoc;
+ }
+
+ return Error(ErrorLoc, "invalid operand for instruction");
+ }
+ case Match_MnemonicFail:
+ return Error(IDLoc, "invalid instruction mnemonic");
+ }
+ llvm_unreachable("Implement any new match types added!");
+}
+
+bool SparcAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
+ SMLoc &EndLoc) {
+ const AsmToken &Tok = Parser.getTok();
+ StartLoc = Tok.getLoc();
+ EndLoc = Tok.getEndLoc();
+ RegNo = 0;
+ if (getLexer().getKind() != AsmToken::Percent)
+ return false;
+ Parser.Lex();
+ unsigned regKind = SparcOperand::rk_None;
+ if (matchRegisterName(Tok, RegNo, regKind)) {
+ Parser.Lex();
+ return false;
+ }
+
+ return Error(StartLoc, "invalid register name");
+}
+
+static void applyMnemonicAliases(StringRef &Mnemonic, uint64_t Features,
+ unsigned VariantID);
+
+bool SparcAsmParser::ParseInstruction(ParseInstructionInfo &Info,
+ StringRef Name, SMLoc NameLoc,
+ OperandVector &Operands) {
+
+ // First operand in MCInst is instruction mnemonic.
+ Operands.push_back(SparcOperand::CreateToken(Name, NameLoc));
+
+ // apply mnemonic aliases, if any, so that we can parse operands correctly.
+ applyMnemonicAliases(Name, getAvailableFeatures(), 0);
+
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ // Read the first operand.
+ if (getLexer().is(AsmToken::Comma)) {
+ if (parseBranchModifiers(Operands) != MatchOperand_Success) {
+ SMLoc Loc = getLexer().getLoc();
+ return Error(Loc, "unexpected token");
+ }
+ }
+ if (parseOperand(Operands, Name) != MatchOperand_Success) {
+ SMLoc Loc = getLexer().getLoc();
+ return Error(Loc, "unexpected token");
+ }
+
+ while (getLexer().is(AsmToken::Comma) || getLexer().is(AsmToken::Plus)) {
+ if (getLexer().is(AsmToken::Plus)) {
+ // Plus tokens are significant in software_traps (p83, sparcv8.pdf). We must capture them.
+ Operands.push_back(SparcOperand::CreateToken("+", Parser.getTok().getLoc()));
+ }
+ Parser.Lex(); // Eat the comma or plus.
+ // Parse and remember the operand.
+ if (parseOperand(Operands, Name) != MatchOperand_Success) {
+ SMLoc Loc = getLexer().getLoc();
+ return Error(Loc, "unexpected token");
+ }
+ }
+ }
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ SMLoc Loc = getLexer().getLoc();
+ return Error(Loc, "unexpected token");
+ }
+ Parser.Lex(); // Consume the EndOfStatement.
+ return false;
+}
+
+bool SparcAsmParser::
+ParseDirective(AsmToken DirectiveID)
+{
+ StringRef IDVal = DirectiveID.getString();
+
+ if (IDVal == ".byte")
+ return parseDirectiveWord(1, DirectiveID.getLoc());
+
+ if (IDVal == ".half")
+ return parseDirectiveWord(2, DirectiveID.getLoc());
+
+ if (IDVal == ".word")
+ return parseDirectiveWord(4, DirectiveID.getLoc());
+
+ if (IDVal == ".nword")
+ return parseDirectiveWord(is64Bit() ? 8 : 4, DirectiveID.getLoc());
+
+ if (is64Bit() && IDVal == ".xword")
+ return parseDirectiveWord(8, DirectiveID.getLoc());
+
+ if (IDVal == ".register") {
+ // For now, ignore .register directive.
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+ if (IDVal == ".proc") {
+ // For compatibility, ignore this directive.
+ // (It's supposed to be an "optimization" in the Sun assembler)
+ Parser.eatToEndOfStatement();
+ return false;
+ }
+
+ // Let the MC layer to handle other directives.
+ return true;
+}
+
+bool SparcAsmParser:: parseDirectiveWord(unsigned Size, SMLoc L) {
+ if (getLexer().isNot(AsmToken::EndOfStatement)) {
+ while (true) {
+ const MCExpr *Value;
+ if (getParser().parseExpression(Value))
+ return true;
+
+ getParser().getStreamer().EmitValue(Value, Size);
+
+ if (getLexer().is(AsmToken::EndOfStatement))
+ break;
+
+ // FIXME: Improve diagnostic.
+ if (getLexer().isNot(AsmToken::Comma))
+ return Error(L, "unexpected token in directive");
+ Parser.Lex();
+ }
+ }
+ Parser.Lex();
+ return false;
+}
+
+OperandMatchResultTy
+SparcAsmParser::parseMEMOperand(OperandVector &Operands) {
+ SMLoc S, E;
+ unsigned BaseReg = 0;
+
+ if (ParseRegister(BaseReg, S, E)) {
+ return MatchOperand_NoMatch;
+ }
+
+ switch (getLexer().getKind()) {
+ default: return MatchOperand_NoMatch;
+
+ case AsmToken::Comma:
+ case AsmToken::RBrac:
+ case AsmToken::EndOfStatement:
+ Operands.push_back(SparcOperand::CreateMEMr(BaseReg, S, E));
+ return MatchOperand_Success;
+
+ case AsmToken:: Plus:
+ Parser.Lex(); // Eat the '+'
+ break;
+ case AsmToken::Minus:
+ break;
+ }
+
+ std::unique_ptr<SparcOperand> Offset;
+ OperandMatchResultTy ResTy = parseSparcAsmOperand(Offset);
+ if (ResTy != MatchOperand_Success || !Offset)
+ return MatchOperand_NoMatch;
+
+ Operands.push_back(
+ Offset->isImm() ? SparcOperand::MorphToMEMri(BaseReg, std::move(Offset))
+ : SparcOperand::MorphToMEMrr(BaseReg, std::move(Offset)));
+
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy
+SparcAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
+
+ OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
+
+ // If there wasn't a custom match, try the generic matcher below. Otherwise,
+ // there was a match, but an error occurred, in which case, just return that
+ // the operand parsing failed.
+ if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail)
+ return ResTy;
+
+ if (getLexer().is(AsmToken::LBrac)) {
+ // Memory operand
+ Operands.push_back(SparcOperand::CreateToken("[",
+ Parser.getTok().getLoc()));
+ Parser.Lex(); // Eat the [
+
+ if (Mnemonic == "cas" || Mnemonic == "casx" || Mnemonic == "casa") {
+ SMLoc S = Parser.getTok().getLoc();
+ if (getLexer().getKind() != AsmToken::Percent)
+ return MatchOperand_NoMatch;
+ Parser.Lex(); // eat %
+
+ unsigned RegNo, RegKind;
+ if (!matchRegisterName(Parser.getTok(), RegNo, RegKind))
+ return MatchOperand_NoMatch;
+
+ Parser.Lex(); // Eat the identifier token.
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer()-1);
+ Operands.push_back(SparcOperand::CreateReg(RegNo, RegKind, S, E));
+ ResTy = MatchOperand_Success;
+ } else {
+ ResTy = parseMEMOperand(Operands);
+ }
+
+ if (ResTy != MatchOperand_Success)
+ return ResTy;
+
+ if (!getLexer().is(AsmToken::RBrac))
+ return MatchOperand_ParseFail;
+
+ Operands.push_back(SparcOperand::CreateToken("]",
+ Parser.getTok().getLoc()));
+ Parser.Lex(); // Eat the ]
+
+ // Parse an optional address-space identifier after the address.
+ if (getLexer().is(AsmToken::Integer)) {
+ std::unique_ptr<SparcOperand> Op;
+ ResTy = parseSparcAsmOperand(Op, false);
+ if (ResTy != MatchOperand_Success || !Op)
+ return MatchOperand_ParseFail;
+ Operands.push_back(std::move(Op));
+ }
+ return MatchOperand_Success;
+ }
+
+ std::unique_ptr<SparcOperand> Op;
+
+ ResTy = parseSparcAsmOperand(Op, (Mnemonic == "call"));
+ if (ResTy != MatchOperand_Success || !Op)
+ return MatchOperand_ParseFail;
+
+ // Push the parsed operand into the list of operands
+ Operands.push_back(std::move(Op));
+
+ return MatchOperand_Success;
+}
+
+OperandMatchResultTy
+SparcAsmParser::parseSparcAsmOperand(std::unique_ptr<SparcOperand> &Op,
+ bool isCall) {
+ SMLoc S = Parser.getTok().getLoc();
+ SMLoc E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ const MCExpr *EVal;
+
+ Op = nullptr;
+ switch (getLexer().getKind()) {
+ default: break;
+
+ case AsmToken::Percent:
+ Parser.Lex(); // Eat the '%'.
+ unsigned RegNo;
+ unsigned RegKind;
+ if (matchRegisterName(Parser.getTok(), RegNo, RegKind)) {
+ StringRef name = Parser.getTok().getString();
+ Parser.Lex(); // Eat the identifier token.
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ switch (RegNo) {
+ default:
+ Op = SparcOperand::CreateReg(RegNo, RegKind, S, E);
+ break;
+ case Sparc::PSR:
+ Op = SparcOperand::CreateToken("%psr", S);
+ break;
+ case Sparc::FSR:
+ Op = SparcOperand::CreateToken("%fsr", S);
+ break;
+ case Sparc::FQ:
+ Op = SparcOperand::CreateToken("%fq", S);
+ break;
+ case Sparc::CPSR:
+ Op = SparcOperand::CreateToken("%csr", S);
+ break;
+ case Sparc::CPQ:
+ Op = SparcOperand::CreateToken("%cq", S);
+ break;
+ case Sparc::WIM:
+ Op = SparcOperand::CreateToken("%wim", S);
+ break;
+ case Sparc::TBR:
+ Op = SparcOperand::CreateToken("%tbr", S);
+ break;
+ case Sparc::ICC:
+ if (name == "xcc")
+ Op = SparcOperand::CreateToken("%xcc", S);
+ else
+ Op = SparcOperand::CreateToken("%icc", S);
+ break;
+ }
+ break;
+ }
+ if (matchSparcAsmModifiers(EVal, E)) {
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ Op = SparcOperand::CreateImm(EVal, S, E);
+ }
+ break;
+
+ case AsmToken::Minus:
+ case AsmToken::Integer:
+ case AsmToken::LParen:
+ case AsmToken::Dot:
+ if (!getParser().parseExpression(EVal, E))
+ Op = SparcOperand::CreateImm(EVal, S, E);
+ break;
+
+ case AsmToken::Identifier: {
+ StringRef Identifier;
+ if (!getParser().parseIdentifier(Identifier)) {
+ E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
+ MCSymbol *Sym = getContext().getOrCreateSymbol(Identifier);
+
+ const MCExpr *Res = MCSymbolRefExpr::create(Sym, MCSymbolRefExpr::VK_None,
+ getContext());
+ if (isCall && getContext().getObjectFileInfo()->isPositionIndependent())
+ Res = SparcMCExpr::create(SparcMCExpr::VK_Sparc_WPLT30, Res,
+ getContext());
+ Op = SparcOperand::CreateImm(Res, S, E);
+ }
+ break;
+ }
+ }
+ return (Op) ? MatchOperand_Success : MatchOperand_ParseFail;
+}
+
+OperandMatchResultTy
+SparcAsmParser::parseBranchModifiers(OperandVector &Operands) {
+ // parse (,a|,pn|,pt)+
+
+ while (getLexer().is(AsmToken::Comma)) {
+ Parser.Lex(); // Eat the comma
+
+ if (!getLexer().is(AsmToken::Identifier))
+ return MatchOperand_ParseFail;
+ StringRef modName = Parser.getTok().getString();
+ if (modName == "a" || modName == "pn" || modName == "pt") {
+ Operands.push_back(SparcOperand::CreateToken(modName,
+ Parser.getTok().getLoc()));
+ Parser.Lex(); // eat the identifier.
+ }
+ }
+ return MatchOperand_Success;
+}
+
+bool SparcAsmParser::matchRegisterName(const AsmToken &Tok, unsigned &RegNo,
+ unsigned &RegKind) {
+ int64_t intVal = 0;
+ RegNo = 0;
+ RegKind = SparcOperand::rk_None;
+ if (Tok.is(AsmToken::Identifier)) {
+ StringRef name = Tok.getString();
+
+ // %fp
+ if (name.equals("fp")) {
+ RegNo = Sparc::I6;
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+ // %sp
+ if (name.equals("sp")) {
+ RegNo = Sparc::O6;
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+
+ if (name.equals("y")) {
+ RegNo = Sparc::Y;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.substr(0, 3).equals_lower("asr")
+ && !name.substr(3).getAsInteger(10, intVal)
+ && intVal > 0 && intVal < 32) {
+ RegNo = ASRRegs[intVal];
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ // %fprs is an alias of %asr6.
+ if (name.equals("fprs")) {
+ RegNo = ASRRegs[6];
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("icc")) {
+ RegNo = Sparc::ICC;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("psr")) {
+ RegNo = Sparc::PSR;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("fsr")) {
+ RegNo = Sparc::FSR;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("fq")) {
+ RegNo = Sparc::FQ;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("csr")) {
+ RegNo = Sparc::CPSR;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("cq")) {
+ RegNo = Sparc::CPQ;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("wim")) {
+ RegNo = Sparc::WIM;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("tbr")) {
+ RegNo = Sparc::TBR;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ if (name.equals("xcc")) {
+ // FIXME:: check 64bit.
+ RegNo = Sparc::ICC;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ // %fcc0 - %fcc3
+ if (name.substr(0, 3).equals_lower("fcc")
+ && !name.substr(3).getAsInteger(10, intVal)
+ && intVal < 4) {
+ // FIXME: check 64bit and handle %fcc1 - %fcc3
+ RegNo = Sparc::FCC0 + intVal;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+
+ // %g0 - %g7
+ if (name.substr(0, 1).equals_lower("g")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 8) {
+ RegNo = IntRegs[intVal];
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+ // %o0 - %o7
+ if (name.substr(0, 1).equals_lower("o")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 8) {
+ RegNo = IntRegs[8 + intVal];
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+ if (name.substr(0, 1).equals_lower("l")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 8) {
+ RegNo = IntRegs[16 + intVal];
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+ if (name.substr(0, 1).equals_lower("i")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 8) {
+ RegNo = IntRegs[24 + intVal];
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+ // %f0 - %f31
+ if (name.substr(0, 1).equals_lower("f")
+ && !name.substr(1, 2).getAsInteger(10, intVal) && intVal < 32) {
+ RegNo = FloatRegs[intVal];
+ RegKind = SparcOperand::rk_FloatReg;
+ return true;
+ }
+ // %f32 - %f62
+ if (name.substr(0, 1).equals_lower("f")
+ && !name.substr(1, 2).getAsInteger(10, intVal)
+ && intVal >= 32 && intVal <= 62 && (intVal % 2 == 0)) {
+ // FIXME: Check V9
+ RegNo = DoubleRegs[intVal/2];
+ RegKind = SparcOperand::rk_DoubleReg;
+ return true;
+ }
+
+ // %r0 - %r31
+ if (name.substr(0, 1).equals_lower("r")
+ && !name.substr(1, 2).getAsInteger(10, intVal) && intVal < 31) {
+ RegNo = IntRegs[intVal];
+ RegKind = SparcOperand::rk_IntReg;
+ return true;
+ }
+
+ // %c0 - %c31
+ if (name.substr(0, 1).equals_lower("c")
+ && !name.substr(1).getAsInteger(10, intVal)
+ && intVal < 32) {
+ RegNo = CoprocRegs[intVal];
+ RegKind = SparcOperand::rk_CoprocReg;
+ return true;
+ }
+
+ if (name.equals("tpc")) {
+ RegNo = Sparc::TPC;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tnpc")) {
+ RegNo = Sparc::TNPC;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tstate")) {
+ RegNo = Sparc::TSTATE;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tt")) {
+ RegNo = Sparc::TT;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tick")) {
+ RegNo = Sparc::TICK;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tba")) {
+ RegNo = Sparc::TBA;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("pstate")) {
+ RegNo = Sparc::PSTATE;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("tl")) {
+ RegNo = Sparc::TL;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("pil")) {
+ RegNo = Sparc::PIL;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("cwp")) {
+ RegNo = Sparc::CWP;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("cansave")) {
+ RegNo = Sparc::CANSAVE;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("canrestore")) {
+ RegNo = Sparc::CANRESTORE;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("cleanwin")) {
+ RegNo = Sparc::CLEANWIN;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("otherwin")) {
+ RegNo = Sparc::OTHERWIN;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ if (name.equals("wstate")) {
+ RegNo = Sparc::WSTATE;
+ RegKind = SparcOperand::rk_Special;
+ return true;
+ }
+ }
+ return false;
+}
+
+// Determine if an expression contains a reference to the symbol
+// "_GLOBAL_OFFSET_TABLE_".
+static bool hasGOTReference(const MCExpr *Expr) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ if (const SparcMCExpr *SE = dyn_cast<SparcMCExpr>(Expr))
+ return hasGOTReference(SE->getSubExpr());
+ break;
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ return hasGOTReference(BE->getLHS()) || hasGOTReference(BE->getRHS());
+ }
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ return (SymRef.getSymbol().getName() == "_GLOBAL_OFFSET_TABLE_");
+ }
+
+ case MCExpr::Unary:
+ return hasGOTReference(cast<MCUnaryExpr>(Expr)->getSubExpr());
+ }
+ return false;
+}
+
+const SparcMCExpr *
+SparcAsmParser::adjustPICRelocation(SparcMCExpr::VariantKind VK,
+ const MCExpr *subExpr) {
+ // When in PIC mode, "%lo(...)" and "%hi(...)" behave differently.
+ // If the expression refers contains _GLOBAL_OFFSETE_TABLE, it is
+ // actually a %pc10 or %pc22 relocation. Otherwise, they are interpreted
+ // as %got10 or %got22 relocation.
+
+ if (getContext().getObjectFileInfo()->isPositionIndependent()) {
+ switch(VK) {
+ default: break;
+ case SparcMCExpr::VK_Sparc_LO:
+ VK = (hasGOTReference(subExpr) ? SparcMCExpr::VK_Sparc_PC10
+ : SparcMCExpr::VK_Sparc_GOT10);
+ break;
+ case SparcMCExpr::VK_Sparc_HI:
+ VK = (hasGOTReference(subExpr) ? SparcMCExpr::VK_Sparc_PC22
+ : SparcMCExpr::VK_Sparc_GOT22);
+ break;
+ }
+ }
+
+ return SparcMCExpr::create(VK, subExpr, getContext());
+}
+
+bool SparcAsmParser::matchSparcAsmModifiers(const MCExpr *&EVal,
+ SMLoc &EndLoc) {
+ AsmToken Tok = Parser.getTok();
+ if (!Tok.is(AsmToken::Identifier))
+ return false;
+
+ StringRef name = Tok.getString();
+
+ SparcMCExpr::VariantKind VK = SparcMCExpr::parseVariantKind(name);
+
+ if (VK == SparcMCExpr::VK_Sparc_None)
+ return false;
+
+ Parser.Lex(); // Eat the identifier.
+ if (Parser.getTok().getKind() != AsmToken::LParen)
+ return false;
+
+ Parser.Lex(); // Eat the LParen token.
+ const MCExpr *subExpr;
+ if (Parser.parseParenExpression(subExpr, EndLoc))
+ return false;
+
+ EVal = adjustPICRelocation(VK, subExpr);
+ return true;
+}
+
+extern "C" void LLVMInitializeSparcAsmParser() {
+ RegisterMCAsmParser<SparcAsmParser> A(getTheSparcTarget());
+ RegisterMCAsmParser<SparcAsmParser> B(getTheSparcV9Target());
+ RegisterMCAsmParser<SparcAsmParser> C(getTheSparcelTarget());
+}
+
+#define GET_REGISTER_MATCHER
+#define GET_MATCHER_IMPLEMENTATION
+#include "SparcGenAsmMatcher.inc"
+
+unsigned SparcAsmParser::validateTargetOperandClass(MCParsedAsmOperand &GOp,
+ unsigned Kind) {
+ SparcOperand &Op = (SparcOperand &)GOp;
+ if (Op.isFloatOrDoubleReg()) {
+ switch (Kind) {
+ default: break;
+ case MCK_DFPRegs:
+ if (!Op.isFloatReg() || SparcOperand::MorphToDoubleReg(Op))
+ return MCTargetAsmParser::Match_Success;
+ break;
+ case MCK_QFPRegs:
+ if (SparcOperand::MorphToQuadReg(Op))
+ return MCTargetAsmParser::Match_Success;
+ break;
+ }
+ }
+ if (Op.isIntReg() && Kind == MCK_IntPair) {
+ if (SparcOperand::MorphToIntPairReg(Op))
+ return MCTargetAsmParser::Match_Success;
+ }
+ if (Op.isCoprocReg() && Kind == MCK_CoprocPair) {
+ if (SparcOperand::MorphToCoprocPairReg(Op))
+ return MCTargetAsmParser::Match_Success;
+ }
+ return Match_InvalidOperand;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
new file mode 100644
index 000000000000..9b1d0f5bf3c9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/DelaySlotFiller.cpp
@@ -0,0 +1,512 @@
+//===-- DelaySlotFiller.cpp - SPARC delay slot filler ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a simple local pass that attempts to fill delay slots with useful
+// instructions. If no instructions can be moved into the delay slot, then a
+// NOP is placed.
+//===----------------------------------------------------------------------===//
+
+#include "Sparc.h"
+#include "SparcSubtarget.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetMachine.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "delay-slot-filler"
+
+STATISTIC(FilledSlots, "Number of delay slots filled");
+
+static cl::opt<bool> DisableDelaySlotFiller(
+ "disable-sparc-delay-filler",
+ cl::init(false),
+ cl::desc("Disable the Sparc delay slot filler."),
+ cl::Hidden);
+
+namespace {
+ struct Filler : public MachineFunctionPass {
+ const SparcSubtarget *Subtarget;
+
+ static char ID;
+ Filler() : MachineFunctionPass(ID) {}
+
+ StringRef getPassName() const override { return "SPARC Delay Slot Filler"; }
+
+ bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
+ bool runOnMachineFunction(MachineFunction &F) override {
+ bool Changed = false;
+ Subtarget = &F.getSubtarget<SparcSubtarget>();
+
+ // This pass invalidates liveness information when it reorders
+ // instructions to fill delay slot.
+ F.getRegInfo().invalidateLiveness();
+
+ for (MachineFunction::iterator FI = F.begin(), FE = F.end();
+ FI != FE; ++FI)
+ Changed |= runOnMachineBasicBlock(*FI);
+ return Changed;
+ }
+
+ MachineFunctionProperties getRequiredProperties() const override {
+ return MachineFunctionProperties().set(
+ MachineFunctionProperties::Property::NoVRegs);
+ }
+
+ void insertCallDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses);
+
+ void insertDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses);
+
+ bool IsRegInSet(SmallSet<unsigned, 32>& RegSet,
+ unsigned Reg);
+
+ bool delayHasHazard(MachineBasicBlock::iterator candidate,
+ bool &sawLoad, bool &sawStore,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses);
+
+ MachineBasicBlock::iterator
+ findDelayInstr(MachineBasicBlock &MBB, MachineBasicBlock::iterator slot);
+
+ bool needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize);
+
+ bool tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI);
+
+ };
+ char Filler::ID = 0;
+} // end of anonymous namespace
+
+/// createSparcDelaySlotFillerPass - Returns a pass that fills in delay
+/// slots in Sparc MachineFunctions
+///
+FunctionPass *llvm::createSparcDelaySlotFillerPass() {
+ return new Filler;
+}
+
+
+/// runOnMachineBasicBlock - Fill in delay slots for the given basic block.
+/// We assume there is only one delay slot per delayed instruction.
+///
+bool Filler::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
+ bool Changed = false;
+ Subtarget = &MBB.getParent()->getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+
+ for (MachineBasicBlock::iterator I = MBB.begin(); I != MBB.end(); ) {
+ MachineBasicBlock::iterator MI = I;
+ ++I;
+
+ // If MI is restore, try combining it with previous inst.
+ if (!DisableDelaySlotFiller &&
+ (MI->getOpcode() == SP::RESTORErr
+ || MI->getOpcode() == SP::RESTOREri)) {
+ Changed |= tryCombineRestoreWithPrevInst(MBB, MI);
+ continue;
+ }
+
+ // TODO: If we ever want to support v7, this needs to be extended
+ // to cover all floating point operations.
+ if (!Subtarget->isV9() &&
+ (MI->getOpcode() == SP::FCMPS || MI->getOpcode() == SP::FCMPD
+ || MI->getOpcode() == SP::FCMPQ)) {
+ BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
+ Changed = true;
+ continue;
+ }
+
+ // If MI has no delay slot, skip.
+ if (!MI->hasDelaySlot())
+ continue;
+
+ MachineBasicBlock::iterator D = MBB.end();
+
+ if (!DisableDelaySlotFiller)
+ D = findDelayInstr(MBB, MI);
+
+ ++FilledSlots;
+ Changed = true;
+
+ if (D == MBB.end())
+ BuildMI(MBB, I, MI->getDebugLoc(), TII->get(SP::NOP));
+ else
+ MBB.splice(I, &MBB, D);
+
+ unsigned structSize = 0;
+ if (needsUnimp(MI, structSize)) {
+ MachineBasicBlock::iterator J = MI;
+ ++J; // skip the delay filler.
+ assert (J != MBB.end() && "MI needs a delay instruction.");
+ BuildMI(MBB, ++J, MI->getDebugLoc(),
+ TII->get(SP::UNIMP)).addImm(structSize);
+ // Bundle the delay filler and unimp with the instruction.
+ MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), J);
+ } else {
+ MIBundleBuilder(MBB, MachineBasicBlock::iterator(MI), I);
+ }
+ }
+ return Changed;
+}
+
+MachineBasicBlock::iterator
+Filler::findDelayInstr(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator slot)
+{
+ SmallSet<unsigned, 32> RegDefs;
+ SmallSet<unsigned, 32> RegUses;
+ bool sawLoad = false;
+ bool sawStore = false;
+
+ if (slot == MBB.begin())
+ return MBB.end();
+
+ if (slot->getOpcode() == SP::RET || slot->getOpcode() == SP::TLS_CALL)
+ return MBB.end();
+
+ if (slot->getOpcode() == SP::RETL) {
+ MachineBasicBlock::iterator J = slot;
+ --J;
+
+ if (J->getOpcode() == SP::RESTORErr
+ || J->getOpcode() == SP::RESTOREri) {
+ // change retl to ret.
+ slot->setDesc(Subtarget->getInstrInfo()->get(SP::RET));
+ return J;
+ }
+ }
+
+ // Call's delay filler can def some of call's uses.
+ if (slot->isCall())
+ insertCallDefsUses(slot, RegDefs, RegUses);
+ else
+ insertDefsUses(slot, RegDefs, RegUses);
+
+ bool done = false;
+
+ MachineBasicBlock::iterator I = slot;
+
+ while (!done) {
+ done = (I == MBB.begin());
+
+ if (!done)
+ --I;
+
+ // skip debug value
+ if (I->isDebugValue())
+ continue;
+
+ if (I->hasUnmodeledSideEffects() || I->isInlineAsm() || I->isPosition() ||
+ I->hasDelaySlot() || I->isBundledWithSucc())
+ break;
+
+ if (delayHasHazard(I, sawLoad, sawStore, RegDefs, RegUses)) {
+ insertDefsUses(I, RegDefs, RegUses);
+ continue;
+ }
+
+ return I;
+ }
+ return MBB.end();
+}
+
+bool Filler::delayHasHazard(MachineBasicBlock::iterator candidate,
+ bool &sawLoad,
+ bool &sawStore,
+ SmallSet<unsigned, 32> &RegDefs,
+ SmallSet<unsigned, 32> &RegUses)
+{
+
+ if (candidate->isImplicitDef() || candidate->isKill())
+ return true;
+
+ if (candidate->mayLoad()) {
+ sawLoad = true;
+ if (sawStore)
+ return true;
+ }
+
+ if (candidate->mayStore()) {
+ if (sawStore)
+ return true;
+ sawStore = true;
+ if (sawLoad)
+ return true;
+ }
+
+ for (unsigned i = 0, e = candidate->getNumOperands(); i!= e; ++i) {
+ const MachineOperand &MO = candidate->getOperand(i);
+ if (!MO.isReg())
+ continue; // skip
+
+ unsigned Reg = MO.getReg();
+
+ if (MO.isDef()) {
+ // check whether Reg is defined or used before delay slot.
+ if (IsRegInSet(RegDefs, Reg) || IsRegInSet(RegUses, Reg))
+ return true;
+ }
+ if (MO.isUse()) {
+ // check whether Reg is defined before delay slot.
+ if (IsRegInSet(RegDefs, Reg))
+ return true;
+ }
+ }
+
+ unsigned Opcode = candidate->getOpcode();
+ // LD and LDD may have NOPs inserted afterwards in the case of some LEON
+ // processors, so we can't use the delay slot if this feature is switched-on.
+ if (Subtarget->insertNOPLoad()
+ &&
+ Opcode >= SP::LDDArr && Opcode <= SP::LDrr)
+ return true;
+
+ // Same as above for FDIV and FSQRT on some LEON processors.
+ if (Subtarget->fixAllFDIVSQRT()
+ &&
+ Opcode >= SP::FDIVD && Opcode <= SP::FSQRTD)
+ return true;
+
+
+ return false;
+}
+
+
+void Filler::insertCallDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses)
+{
+ // Call defines o7, which is visible to the instruction in delay slot.
+ RegDefs.insert(SP::O7);
+
+ switch(MI->getOpcode()) {
+ default: llvm_unreachable("Unknown opcode.");
+ case SP::CALL: break;
+ case SP::CALLrr:
+ case SP::CALLri:
+ assert(MI->getNumOperands() >= 2);
+ const MachineOperand &Reg = MI->getOperand(0);
+ assert(Reg.isReg() && "CALL first operand is not a register.");
+ assert(Reg.isUse() && "CALL first operand is not a use.");
+ RegUses.insert(Reg.getReg());
+
+ const MachineOperand &Operand1 = MI->getOperand(1);
+ if (Operand1.isImm() || Operand1.isGlobal())
+ break;
+ assert(Operand1.isReg() && "CALLrr second operand is not a register.");
+ assert(Operand1.isUse() && "CALLrr second operand is not a use.");
+ RegUses.insert(Operand1.getReg());
+ break;
+ }
+}
+
+// Insert Defs and Uses of MI into the sets RegDefs and RegUses.
+void Filler::insertDefsUses(MachineBasicBlock::iterator MI,
+ SmallSet<unsigned, 32>& RegDefs,
+ SmallSet<unsigned, 32>& RegUses)
+{
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isReg())
+ continue;
+
+ unsigned Reg = MO.getReg();
+ if (Reg == 0)
+ continue;
+ if (MO.isDef())
+ RegDefs.insert(Reg);
+ if (MO.isUse()) {
+ // Implicit register uses of retl are return values and
+ // retl does not use them.
+ if (MO.isImplicit() && MI->getOpcode() == SP::RETL)
+ continue;
+ RegUses.insert(Reg);
+ }
+ }
+}
+
+// returns true if the Reg or its alias is in the RegSet.
+bool Filler::IsRegInSet(SmallSet<unsigned, 32>& RegSet, unsigned Reg)
+{
+ // Check Reg and all aliased Registers.
+ for (MCRegAliasIterator AI(Reg, Subtarget->getRegisterInfo(), true);
+ AI.isValid(); ++AI)
+ if (RegSet.count(*AI))
+ return true;
+ return false;
+}
+
+bool Filler::needsUnimp(MachineBasicBlock::iterator I, unsigned &StructSize)
+{
+ if (!I->isCall())
+ return false;
+
+ unsigned structSizeOpNum = 0;
+ switch (I->getOpcode()) {
+ default: llvm_unreachable("Unknown call opcode.");
+ case SP::CALL: structSizeOpNum = 1; break;
+ case SP::CALLrr:
+ case SP::CALLri: structSizeOpNum = 2; break;
+ case SP::TLS_CALL: return false;
+ }
+
+ const MachineOperand &MO = I->getOperand(structSizeOpNum);
+ if (!MO.isImm())
+ return false;
+ StructSize = MO.getImm();
+ return true;
+}
+
+static bool combineRestoreADD(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator AddMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: add <op0>, <op1>, %i[0-7]
+ // restore %g0, %g0, %i[0-7]
+ //
+ // After : restore <op0>, <op1>, %o[0-7]
+
+ unsigned reg = AddMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ // Erase RESTORE.
+ RestoreMI->eraseFromParent();
+
+ // Change ADD to RESTORE.
+ AddMI->setDesc(TII->get((AddMI->getOpcode() == SP::ADDrr)
+ ? SP::RESTORErr
+ : SP::RESTOREri));
+
+ // Map the destination register.
+ AddMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+
+ return true;
+}
+
+static bool combineRestoreOR(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator OrMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: or <op0>, <op1>, %i[0-7]
+ // restore %g0, %g0, %i[0-7]
+ // and <op0> or <op1> is zero,
+ //
+ // After : restore <op0>, <op1>, %o[0-7]
+
+ unsigned reg = OrMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ // check whether it is a copy.
+ if (OrMI->getOpcode() == SP::ORrr
+ && OrMI->getOperand(1).getReg() != SP::G0
+ && OrMI->getOperand(2).getReg() != SP::G0)
+ return false;
+
+ if (OrMI->getOpcode() == SP::ORri
+ && OrMI->getOperand(1).getReg() != SP::G0
+ && (!OrMI->getOperand(2).isImm() || OrMI->getOperand(2).getImm() != 0))
+ return false;
+
+ // Erase RESTORE.
+ RestoreMI->eraseFromParent();
+
+ // Change OR to RESTORE.
+ OrMI->setDesc(TII->get((OrMI->getOpcode() == SP::ORrr)
+ ? SP::RESTORErr
+ : SP::RESTOREri));
+
+ // Map the destination register.
+ OrMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+
+ return true;
+}
+
+static bool combineRestoreSETHIi(MachineBasicBlock::iterator RestoreMI,
+ MachineBasicBlock::iterator SetHiMI,
+ const TargetInstrInfo *TII)
+{
+ // Before: sethi imm3, %i[0-7]
+ // restore %g0, %g0, %g0
+ //
+ // After : restore %g0, (imm3<<10), %o[0-7]
+
+ unsigned reg = SetHiMI->getOperand(0).getReg();
+ if (reg < SP::I0 || reg > SP::I7)
+ return false;
+
+ if (!SetHiMI->getOperand(1).isImm())
+ return false;
+
+ int64_t imm = SetHiMI->getOperand(1).getImm();
+
+ // Is it a 3 bit immediate?
+ if (!isInt<3>(imm))
+ return false;
+
+ // Make it a 13 bit immediate.
+ imm = (imm << 10) & 0x1FFF;
+
+ assert(RestoreMI->getOpcode() == SP::RESTORErr);
+
+ RestoreMI->setDesc(TII->get(SP::RESTOREri));
+
+ RestoreMI->getOperand(0).setReg(reg - SP::I0 + SP::O0);
+ RestoreMI->getOperand(1).setReg(SP::G0);
+ RestoreMI->getOperand(2).ChangeToImmediate(imm);
+
+
+ // Erase the original SETHI.
+ SetHiMI->eraseFromParent();
+
+ return true;
+}
+
+bool Filler::tryCombineRestoreWithPrevInst(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI)
+{
+ // No previous instruction.
+ if (MBBI == MBB.begin())
+ return false;
+
+ // assert that MBBI is a "restore %g0, %g0, %g0".
+ assert(MBBI->getOpcode() == SP::RESTORErr
+ && MBBI->getOperand(0).getReg() == SP::G0
+ && MBBI->getOperand(1).getReg() == SP::G0
+ && MBBI->getOperand(2).getReg() == SP::G0);
+
+ MachineBasicBlock::iterator PrevInst = std::prev(MBBI);
+
+ // It cannot be combined with a bundled instruction.
+ if (PrevInst->isBundledWithSucc())
+ return false;
+
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+
+ switch (PrevInst->getOpcode()) {
+ default: break;
+ case SP::ADDrr:
+ case SP::ADDri: return combineRestoreADD(MBBI, PrevInst, TII); break;
+ case SP::ORrr:
+ case SP::ORri: return combineRestoreOR(MBBI, PrevInst, TII); break;
+ case SP::SETHIi: return combineRestoreSETHIi(MBBI, PrevInst, TII); break;
+ }
+ // It cannot combine with the previous instruction.
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp b/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
new file mode 100644
index 000000000000..8e298e8316da
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/Disassembler/SparcDisassembler.cpp
@@ -0,0 +1,670 @@
+//===- SparcDisassembler.cpp - Disassembler for Sparc -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is part of the Sparc Disassembler.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sparc.h"
+#include "SparcRegisterInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCDisassembler/MCDisassembler.h"
+#include "llvm/MC/MCFixedLenDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "sparc-disassembler"
+
+typedef MCDisassembler::DecodeStatus DecodeStatus;
+
+namespace {
+
+/// A disassembler class for Sparc.
+class SparcDisassembler : public MCDisassembler {
+public:
+ SparcDisassembler(const MCSubtargetInfo &STI, MCContext &Ctx)
+ : MCDisassembler(STI, Ctx) {}
+ virtual ~SparcDisassembler() {}
+
+ DecodeStatus getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes, uint64_t Address,
+ raw_ostream &VStream,
+ raw_ostream &CStream) const override;
+};
+}
+
+namespace llvm {
+Target &getTheSparcTarget();
+Target &getTheSparcV9Target();
+Target &getTheSparcelTarget();
+}
+
+static MCDisassembler *createSparcDisassembler(const Target &T,
+ const MCSubtargetInfo &STI,
+ MCContext &Ctx) {
+ return new SparcDisassembler(STI, Ctx);
+}
+
+
+extern "C" void LLVMInitializeSparcDisassembler() {
+ // Register the disassembler.
+ TargetRegistry::RegisterMCDisassembler(getTheSparcTarget(),
+ createSparcDisassembler);
+ TargetRegistry::RegisterMCDisassembler(getTheSparcV9Target(),
+ createSparcDisassembler);
+ TargetRegistry::RegisterMCDisassembler(getTheSparcelTarget(),
+ createSparcDisassembler);
+}
+
+static const unsigned IntRegDecoderTable[] = {
+ SP::G0, SP::G1, SP::G2, SP::G3,
+ SP::G4, SP::G5, SP::G6, SP::G7,
+ SP::O0, SP::O1, SP::O2, SP::O3,
+ SP::O4, SP::O5, SP::O6, SP::O7,
+ SP::L0, SP::L1, SP::L2, SP::L3,
+ SP::L4, SP::L5, SP::L6, SP::L7,
+ SP::I0, SP::I1, SP::I2, SP::I3,
+ SP::I4, SP::I5, SP::I6, SP::I7 };
+
+static const unsigned FPRegDecoderTable[] = {
+ SP::F0, SP::F1, SP::F2, SP::F3,
+ SP::F4, SP::F5, SP::F6, SP::F7,
+ SP::F8, SP::F9, SP::F10, SP::F11,
+ SP::F12, SP::F13, SP::F14, SP::F15,
+ SP::F16, SP::F17, SP::F18, SP::F19,
+ SP::F20, SP::F21, SP::F22, SP::F23,
+ SP::F24, SP::F25, SP::F26, SP::F27,
+ SP::F28, SP::F29, SP::F30, SP::F31 };
+
+static const unsigned DFPRegDecoderTable[] = {
+ SP::D0, SP::D16, SP::D1, SP::D17,
+ SP::D2, SP::D18, SP::D3, SP::D19,
+ SP::D4, SP::D20, SP::D5, SP::D21,
+ SP::D6, SP::D22, SP::D7, SP::D23,
+ SP::D8, SP::D24, SP::D9, SP::D25,
+ SP::D10, SP::D26, SP::D11, SP::D27,
+ SP::D12, SP::D28, SP::D13, SP::D29,
+ SP::D14, SP::D30, SP::D15, SP::D31 };
+
+static const unsigned QFPRegDecoderTable[] = {
+ SP::Q0, SP::Q8, ~0U, ~0U,
+ SP::Q1, SP::Q9, ~0U, ~0U,
+ SP::Q2, SP::Q10, ~0U, ~0U,
+ SP::Q3, SP::Q11, ~0U, ~0U,
+ SP::Q4, SP::Q12, ~0U, ~0U,
+ SP::Q5, SP::Q13, ~0U, ~0U,
+ SP::Q6, SP::Q14, ~0U, ~0U,
+ SP::Q7, SP::Q15, ~0U, ~0U } ;
+
+static const unsigned FCCRegDecoderTable[] = {
+ SP::FCC0, SP::FCC1, SP::FCC2, SP::FCC3 };
+
+static const unsigned ASRRegDecoderTable[] = {
+ SP::Y, SP::ASR1, SP::ASR2, SP::ASR3,
+ SP::ASR4, SP::ASR5, SP::ASR6, SP::ASR7,
+ SP::ASR8, SP::ASR9, SP::ASR10, SP::ASR11,
+ SP::ASR12, SP::ASR13, SP::ASR14, SP::ASR15,
+ SP::ASR16, SP::ASR17, SP::ASR18, SP::ASR19,
+ SP::ASR20, SP::ASR21, SP::ASR22, SP::ASR23,
+ SP::ASR24, SP::ASR25, SP::ASR26, SP::ASR27,
+ SP::ASR28, SP::ASR29, SP::ASR30, SP::ASR31};
+
+static const unsigned PRRegDecoderTable[] = {
+ SP::TPC, SP::TNPC, SP::TSTATE, SP::TT, SP::TICK, SP::TBA, SP::PSTATE,
+ SP::TL, SP::PIL, SP::CWP, SP::CANSAVE, SP::CANRESTORE, SP::CLEANWIN,
+ SP::OTHERWIN, SP::WSTATE
+};
+
+static const uint16_t IntPairDecoderTable[] = {
+ SP::G0_G1, SP::G2_G3, SP::G4_G5, SP::G6_G7,
+ SP::O0_O1, SP::O2_O3, SP::O4_O5, SP::O6_O7,
+ SP::L0_L1, SP::L2_L3, SP::L4_L5, SP::L6_L7,
+ SP::I0_I1, SP::I2_I3, SP::I4_I5, SP::I6_I7,
+};
+
+static const unsigned CPRegDecoderTable[] = {
+ SP::C0, SP::C1, SP::C2, SP::C3,
+ SP::C4, SP::C5, SP::C6, SP::C7,
+ SP::C8, SP::C9, SP::C10, SP::C11,
+ SP::C12, SP::C13, SP::C14, SP::C15,
+ SP::C16, SP::C17, SP::C18, SP::C19,
+ SP::C20, SP::C21, SP::C22, SP::C23,
+ SP::C24, SP::C25, SP::C26, SP::C27,
+ SP::C28, SP::C29, SP::C30, SP::C31
+};
+
+
+static const uint16_t CPPairDecoderTable[] = {
+ SP::C0_C1, SP::C2_C3, SP::C4_C5, SP::C6_C7,
+ SP::C8_C9, SP::C10_C11, SP::C12_C13, SP::C14_C15,
+ SP::C16_C17, SP::C18_C19, SP::C20_C21, SP::C22_C23,
+ SP::C24_C25, SP::C26_C27, SP::C28_C29, SP::C30_C31
+};
+
+static DecodeStatus DecodeIntRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = IntRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeI64RegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = IntRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+
+static DecodeStatus DecodeFPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = FPRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+
+static DecodeStatus DecodeDFPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = DFPRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+
+static DecodeStatus DecodeQFPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned Reg = QFPRegDecoderTable[RegNo];
+ if (Reg == ~0U)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeCPRegsRegisterClass(MCInst &Inst,
+ unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ unsigned Reg = CPRegDecoderTable[RegNo];
+ Inst.addOperand(MCOperand::createReg(Reg));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeFCCRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 3)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createReg(FCCRegDecoderTable[RegNo]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeASRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createReg(ASRRegDecoderTable[RegNo]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodePRRegsRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address,
+ const void *Decoder) {
+ if (RegNo >= array_lengthof(PRRegDecoderTable))
+ return MCDisassembler::Fail;
+ Inst.addOperand(MCOperand::createReg(PRRegDecoderTable[RegNo]));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeIntPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ DecodeStatus S = MCDisassembler::Success;
+
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ if ((RegNo & 1))
+ S = MCDisassembler::SoftFail;
+
+ unsigned RegisterPair = IntPairDecoderTable[RegNo/2];
+ Inst.addOperand(MCOperand::createReg(RegisterPair));
+ return S;
+}
+
+static DecodeStatus DecodeCPPairRegisterClass(MCInst &Inst, unsigned RegNo,
+ uint64_t Address, const void *Decoder) {
+ if (RegNo > 31)
+ return MCDisassembler::Fail;
+
+ unsigned RegisterPair = CPPairDecoderTable[RegNo/2];
+ Inst.addOperand(MCOperand::createReg(RegisterPair));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeLoadInt(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadIntPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadDFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadCP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeLoadCPPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreIntPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreDFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreCP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeStoreCPPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeCall(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeSIMM13(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder);
+static DecodeStatus DecodeJMPL(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeReturn(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeSWAP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+static DecodeStatus DecodeTRAP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder);
+
+#include "SparcGenDisassemblerTables.inc"
+
+/// Read four bytes from the ArrayRef and return 32 bit word.
+static DecodeStatus readInstruction32(ArrayRef<uint8_t> Bytes, uint64_t Address,
+ uint64_t &Size, uint32_t &Insn,
+ bool IsLittleEndian) {
+ // We want to read exactly 4 Bytes of data.
+ if (Bytes.size() < 4) {
+ Size = 0;
+ return MCDisassembler::Fail;
+ }
+
+ Insn = IsLittleEndian
+ ? (Bytes[0] << 0) | (Bytes[1] << 8) | (Bytes[2] << 16) |
+ (Bytes[3] << 24)
+ : (Bytes[3] << 0) | (Bytes[2] << 8) | (Bytes[1] << 16) |
+ (Bytes[0] << 24);
+
+ return MCDisassembler::Success;
+}
+
+DecodeStatus SparcDisassembler::getInstruction(MCInst &Instr, uint64_t &Size,
+ ArrayRef<uint8_t> Bytes,
+ uint64_t Address,
+ raw_ostream &VStream,
+ raw_ostream &CStream) const {
+ uint32_t Insn;
+ bool isLittleEndian = getContext().getAsmInfo()->isLittleEndian();
+ DecodeStatus Result =
+ readInstruction32(Bytes, Address, Size, Insn, isLittleEndian);
+ if (Result == MCDisassembler::Fail)
+ return MCDisassembler::Fail;
+
+ // Calling the auto-generated decoder function.
+
+ if (STI.getFeatureBits()[Sparc::FeatureV9])
+ {
+ Result = decodeInstruction(DecoderTableSparcV932, Instr, Insn, Address, this, STI);
+ }
+ else
+ {
+ Result = decodeInstruction(DecoderTableSparcV832, Instr, Insn, Address, this, STI);
+ }
+ if (Result != MCDisassembler::Fail)
+ return Result;
+
+ Result =
+ decodeInstruction(DecoderTableSparc32, Instr, Insn, Address, this, STI);
+
+ if (Result != MCDisassembler::Fail) {
+ Size = 4;
+ return Result;
+ }
+
+ return MCDisassembler::Fail;
+}
+
+
+typedef DecodeStatus (*DecodeFunc)(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder);
+
+static DecodeStatus DecodeMem(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder,
+ bool isLoad, DecodeFunc DecodeRD) {
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ bool isImm = fieldFromInstruction(insn, 13, 1);
+ bool hasAsi = fieldFromInstruction(insn, 23, 1); // (in op3 field)
+ unsigned asi = fieldFromInstruction(insn, 5, 8);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ DecodeStatus status;
+ if (isLoad) {
+ status = DecodeRD(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ // Decode rs1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode imm|rs2.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ if (hasAsi)
+ MI.addOperand(MCOperand::createImm(asi));
+
+ if (!isLoad) {
+ status = DecodeRD(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeLoadInt(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeIntRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadIntPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeIntPairRegisterClass);
+}
+
+static DecodeStatus DecodeLoadFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadDFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeDFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadQFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeQFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadCP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeCPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeLoadCPPair(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, true,
+ DecodeCPPairRegisterClass);
+}
+
+static DecodeStatus DecodeStoreInt(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeIntRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreIntPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeIntPairRegisterClass);
+}
+
+static DecodeStatus DecodeStoreFP(MCInst &Inst, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreDFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeDFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreQFP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeQFPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreCP(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeCPRegsRegisterClass);
+}
+
+static DecodeStatus DecodeStoreCPPair(MCInst &Inst, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ return DecodeMem(Inst, insn, Address, Decoder, false,
+ DecodeCPPairRegisterClass);
+}
+
+static bool tryAddingSymbolicOperand(int64_t Value, bool isBranch,
+ uint64_t Address, uint64_t Offset,
+ uint64_t Width, MCInst &MI,
+ const void *Decoder) {
+ const MCDisassembler *Dis = static_cast<const MCDisassembler*>(Decoder);
+ return Dis->tryAddingSymbolicOperand(MI, Value, Address, isBranch,
+ Offset, Width);
+}
+
+static DecodeStatus DecodeCall(MCInst &MI, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned tgt = fieldFromInstruction(insn, 0, 30);
+ tgt <<= 2;
+ if (!tryAddingSymbolicOperand(tgt+Address, false, Address,
+ 0, 30, MI, Decoder))
+ MI.addOperand(MCOperand::createImm(tgt));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSIMM13(MCInst &MI, unsigned insn,
+ uint64_t Address, const void *Decoder) {
+ unsigned tgt = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ MI.addOperand(MCOperand::createImm(tgt));
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeJMPL(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RD.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeReturn(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RS1.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS2 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeSWAP(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rd = fieldFromInstruction(insn, 25, 5);
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ bool hasAsi = fieldFromInstruction(insn, 23, 1); // (in op3 field)
+ unsigned asi = fieldFromInstruction(insn, 5, 8);
+ unsigned rs2 = 0;
+ unsigned simm13 = 0;
+ if (isImm)
+ simm13 = SignExtend32<13>(fieldFromInstruction(insn, 0, 13));
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RD.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rd, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1.
+ status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | SIMM13.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(simm13));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ if (hasAsi)
+ MI.addOperand(MCOperand::createImm(asi));
+
+ return MCDisassembler::Success;
+}
+
+static DecodeStatus DecodeTRAP(MCInst &MI, unsigned insn, uint64_t Address,
+ const void *Decoder) {
+
+ unsigned rs1 = fieldFromInstruction(insn, 14, 5);
+ unsigned isImm = fieldFromInstruction(insn, 13, 1);
+ unsigned cc =fieldFromInstruction(insn, 25, 4);
+ unsigned rs2 = 0;
+ unsigned imm7 = 0;
+ if (isImm)
+ imm7 = fieldFromInstruction(insn, 0, 7);
+ else
+ rs2 = fieldFromInstruction(insn, 0, 5);
+
+ // Decode RS1.
+ DecodeStatus status = DecodeIntRegsRegisterClass(MI, rs1, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+
+ // Decode RS1 | IMM7.
+ if (isImm)
+ MI.addOperand(MCOperand::createImm(imm7));
+ else {
+ status = DecodeIntRegsRegisterClass(MI, rs2, Address, Decoder);
+ if (status != MCDisassembler::Success)
+ return status;
+ }
+
+ // Decode CC
+ MI.addOperand(MCOperand::createImm(cc));
+
+ return MCDisassembler::Success;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
new file mode 100644
index 000000000000..4981deae6af6
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.cpp
@@ -0,0 +1,197 @@
+//===-- SparcInstPrinter.cpp - Convert Sparc MCInst to assembly syntax -----==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an Sparc MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcInstPrinter.h"
+#include "Sparc.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+// The generated AsmMatcher SparcGenAsmWriter uses "Sparc" as the target
+// namespace. But SPARC backend uses "SP" as its namespace.
+namespace llvm {
+namespace Sparc {
+ using namespace SP;
+}
+}
+
+#define GET_INSTRUCTION_NAME
+#define PRINT_ALIAS_INSTR
+#include "SparcGenAsmWriter.inc"
+
+bool SparcInstPrinter::isV9(const MCSubtargetInfo &STI) const {
+ return (STI.getFeatureBits()[Sparc::FeatureV9]) != 0;
+}
+
+void SparcInstPrinter::printRegName(raw_ostream &OS, unsigned RegNo) const
+{
+ OS << '%' << StringRef(getRegisterName(RegNo)).lower();
+}
+
+void SparcInstPrinter::printInst(const MCInst *MI, raw_ostream &O,
+ StringRef Annot, const MCSubtargetInfo &STI) {
+ if (!printAliasInstr(MI, STI, O) && !printSparcAliasInstr(MI, STI, O))
+ printInstruction(MI, STI, O);
+ printAnnotation(O, Annot);
+}
+
+bool SparcInstPrinter::printSparcAliasInstr(const MCInst *MI,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ switch (MI->getOpcode()) {
+ default: return false;
+ case SP::JMPLrr:
+ case SP::JMPLri: {
+ if (MI->getNumOperands() != 3)
+ return false;
+ if (!MI->getOperand(0).isReg())
+ return false;
+ switch (MI->getOperand(0).getReg()) {
+ default: return false;
+ case SP::G0: // jmp $addr | ret | retl
+ if (MI->getOperand(2).isImm() &&
+ MI->getOperand(2).getImm() == 8) {
+ switch(MI->getOperand(1).getReg()) {
+ default: break;
+ case SP::I7: O << "\tret"; return true;
+ case SP::O7: O << "\tretl"; return true;
+ }
+ }
+ O << "\tjmp "; printMemOperand(MI, 1, STI, O);
+ return true;
+ case SP::O7: // call $addr
+ O << "\tcall "; printMemOperand(MI, 1, STI, O);
+ return true;
+ }
+ }
+ case SP::V9FCMPS: case SP::V9FCMPD: case SP::V9FCMPQ:
+ case SP::V9FCMPES: case SP::V9FCMPED: case SP::V9FCMPEQ: {
+ if (isV9(STI)
+ || (MI->getNumOperands() != 3)
+ || (!MI->getOperand(0).isReg())
+ || (MI->getOperand(0).getReg() != SP::FCC0))
+ return false;
+ // if V8, skip printing %fcc0.
+ switch(MI->getOpcode()) {
+ default:
+ case SP::V9FCMPS: O << "\tfcmps "; break;
+ case SP::V9FCMPD: O << "\tfcmpd "; break;
+ case SP::V9FCMPQ: O << "\tfcmpq "; break;
+ case SP::V9FCMPES: O << "\tfcmpes "; break;
+ case SP::V9FCMPED: O << "\tfcmped "; break;
+ case SP::V9FCMPEQ: O << "\tfcmpeq "; break;
+ }
+ printOperand(MI, 1, STI, O);
+ O << ", ";
+ printOperand(MI, 2, STI, O);
+ return true;
+ }
+ }
+}
+
+void SparcInstPrinter::printOperand(const MCInst *MI, int opNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ const MCOperand &MO = MI->getOperand (opNum);
+
+ if (MO.isReg()) {
+ printRegName(O, MO.getReg());
+ return ;
+ }
+
+ if (MO.isImm()) {
+ switch (MI->getOpcode()) {
+ default:
+ O << (int)MO.getImm();
+ return;
+
+ case SP::TICCri: // Fall through
+ case SP::TICCrr: // Fall through
+ case SP::TRAPri: // Fall through
+ case SP::TRAPrr: // Fall through
+ case SP::TXCCri: // Fall through
+ case SP::TXCCrr: // Fall through
+ // Only seven-bit values up to 127.
+ O << ((int) MO.getImm() & 0x7f);
+ return;
+ }
+ }
+
+ assert(MO.isExpr() && "Unknown operand kind in printOperand");
+ MO.getExpr()->print(O, &MAI);
+}
+
+void SparcInstPrinter::printMemOperand(const MCInst *MI, int opNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, opNum, STI, O);
+
+ // If this is an ADD operand, emit it like normal operands.
+ if (Modifier && !strcmp(Modifier, "arith")) {
+ O << ", ";
+ printOperand(MI, opNum+1, STI, O);
+ return;
+ }
+ const MCOperand &MO = MI->getOperand(opNum+1);
+
+ if (MO.isReg() && MO.getReg() == SP::G0)
+ return; // don't print "+%g0"
+ if (MO.isImm() && MO.getImm() == 0)
+ return; // don't print "+0"
+
+ O << "+";
+
+ printOperand(MI, opNum+1, STI, O);
+}
+
+void SparcInstPrinter::printCCOperand(const MCInst *MI, int opNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ int CC = (int)MI->getOperand(opNum).getImm();
+ switch (MI->getOpcode()) {
+ default: break;
+ case SP::FBCOND:
+ case SP::FBCONDA:
+ case SP::BPFCC:
+ case SP::BPFCCA:
+ case SP::BPFCCNT:
+ case SP::BPFCCANT:
+ case SP::MOVFCCrr: case SP::V9MOVFCCrr:
+ case SP::MOVFCCri: case SP::V9MOVFCCri:
+ case SP::FMOVS_FCC: case SP::V9FMOVS_FCC:
+ case SP::FMOVD_FCC: case SP::V9FMOVD_FCC:
+ case SP::FMOVQ_FCC: case SP::V9FMOVQ_FCC:
+ // Make sure CC is a fp conditional flag.
+ CC = (CC < 16) ? (CC + 16) : CC;
+ break;
+ case SP::CBCOND:
+ case SP::CBCONDA:
+ // Make sure CC is a cp conditional flag.
+ CC = (CC < 32) ? (CC + 32) : CC;
+ break;
+ }
+ O << SPARCCondCodeToString((SPCC::CondCodes)CC);
+}
+
+bool SparcInstPrinter::printGetPCX(const MCInst *MI, unsigned opNum,
+ const MCSubtargetInfo &STI,
+ raw_ostream &O) {
+ llvm_unreachable("FIXME: Implement SparcInstPrinter::printGetPCX.");
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h
new file mode 100644
index 000000000000..6f06d1ddae32
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/InstPrinter/SparcInstPrinter.h
@@ -0,0 +1,55 @@
+//===-- SparcInstPrinter.h - Convert Sparc MCInst to assembly syntax ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class prints an Sparc MCInst to a .s file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_INSTPRINTER_SPARCINSTPRINTER_H
+#define LLVM_LIB_TARGET_SPARC_INSTPRINTER_SPARCINSTPRINTER_H
+
+#include "llvm/MC/MCInstPrinter.h"
+
+namespace llvm {
+
+class SparcInstPrinter : public MCInstPrinter {
+public:
+ SparcInstPrinter(const MCAsmInfo &MAI, const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI)
+ : MCInstPrinter(MAI, MII, MRI) {}
+
+ void printRegName(raw_ostream &OS, unsigned RegNo) const override;
+ void printInst(const MCInst *MI, raw_ostream &O, StringRef Annot,
+ const MCSubtargetInfo &STI) override;
+ bool printSparcAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &OS);
+ bool isV9(const MCSubtargetInfo &STI) const;
+
+ // Autogenerated by tblgen.
+ void printInstruction(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ bool printAliasInstr(const MCInst *MI, const MCSubtargetInfo &STI,
+ raw_ostream &O);
+ void printCustomAliasOperand(const MCInst *MI, unsigned OpIdx,
+ unsigned PrintMethodIdx,
+ const MCSubtargetInfo &STI, raw_ostream &O);
+ static const char *getRegisterName(unsigned RegNo);
+
+ void printOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ raw_ostream &OS);
+ void printMemOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ raw_ostream &OS, const char *Modifier = nullptr);
+ void printCCOperand(const MCInst *MI, int opNum, const MCSubtargetInfo &STI,
+ raw_ostream &OS);
+ bool printGetPCX(const MCInst *MI, unsigned OpNo, const MCSubtargetInfo &STI,
+ raw_ostream &OS);
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/LeonFeatures.td b/contrib/llvm/lib/Target/Sparc/LeonFeatures.td
new file mode 100755
index 000000000000..d9efe094d078
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/LeonFeatures.td
@@ -0,0 +1,68 @@
+//===-- LeonFeatures.td - Describe the Leon Features -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// UMAC and SMAC support for LEON3 and LEON4 processors.
+//===----------------------------------------------------------------------===//
+
+//support to casa instruction; for leon3 subtarget only
+def UMACSMACSupport : SubtargetFeature<
+ "hasumacsmac",
+ "HasUmacSmac",
+ "true",
+ "Enable UMAC and SMAC for LEON3 and LEON4 processors"
+>;
+
+
+//===----------------------------------------------------------------------===//
+// CASA Support differs between LEON3-FT GR712RC and LEON3-FT UT699
+// We need to have the option to switch this on and off.
+//===----------------------------------------------------------------------===//
+
+//support to casa instruction; for leon3 subtarget only
+def LeonCASA : SubtargetFeature<
+ "hasleoncasa",
+ "HasLeonCasa",
+ "true",
+ "Enable CASA instruction for LEON3 and LEON4 processors"
+>;
+
+
+def ReplaceSDIV : SubtargetFeature<
+ "replacesdiv",
+ "PerformSDIVReplace",
+ "true",
+ "AT697E erratum fix: Do not emit SDIV, emit SDIVCC instead"
+>;
+
+def InsertNOPLoad: SubtargetFeature<
+ "insertnopload",
+ "InsertNOPLoad",
+ "true",
+ "LEON3 erratum fix: Insert a NOP instruction after every single-cycle load instruction when the next instruction is another load/store instruction"
+>;
+
+def DetectRoundChange : SubtargetFeature<
+ "detectroundchange",
+ "DetectRoundChange",
+ "true",
+ "LEON3 erratum detection: Detects any rounding mode change "
+ "request: use only the round-to-nearest rounding mode"
+>;
+
+def FixAllFDIVSQRT : SubtargetFeature<
+ "fixallfdivsqrt",
+ "FixAllFDIVSQRT",
+ "true",
+ "LEON erratum fix: Fix FDIVS/FDIVD/FSQRTS/FSQRTD instructions with NOPs and floating-point store"
+>;
diff --git a/contrib/llvm/lib/Target/Sparc/LeonPasses.cpp b/contrib/llvm/lib/Target/Sparc/LeonPasses.cpp
new file mode 100755
index 000000000000..5ce00db365ab
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/LeonPasses.cpp
@@ -0,0 +1,158 @@
+//===------ LeonPasses.cpp - Define passes specific to LEON ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "LeonPasses.h"
+#include "llvm/CodeGen/ISDOpcodes.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+LEONMachineFunctionPass::LEONMachineFunctionPass(char &ID)
+ : MachineFunctionPass(ID) {}
+
+//*****************************************************************************
+//**** InsertNOPLoad pass
+//*****************************************************************************
+// This pass fixes the incorrectly working Load instructions that exists for
+// some earlier versions of the LEON processor line. NOP instructions must
+// be inserted after the load instruction to ensure that the Load instruction
+// behaves as expected for these processors.
+//
+// This pass inserts a NOP after any LD or LDF instruction.
+//
+char InsertNOPLoad::ID = 0;
+
+InsertNOPLoad::InsertNOPLoad() : LEONMachineFunctionPass(ID) {}
+
+bool InsertNOPLoad::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode >= SP::LDDArr && Opcode <= SP::LDrr) {
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+ Modified = true;
+ }
+ }
+ }
+
+ return Modified;
+}
+
+
+
+//*****************************************************************************
+//**** DetectRoundChange pass
+//*****************************************************************************
+// To prevent any explicit change of the default rounding mode, this pass
+// detects any call of the fesetround function.
+// A warning is generated to ensure the user knows this has happened.
+//
+// Detects an erratum in UT699 LEON 3 processor
+
+char DetectRoundChange::ID = 0;
+
+DetectRoundChange::DetectRoundChange() : LEONMachineFunctionPass(ID) {}
+
+bool DetectRoundChange::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+ if (Opcode == SP::CALL && MI.getNumOperands() > 0) {
+ MachineOperand &MO = MI.getOperand(0);
+
+ if (MO.isGlobal()) {
+ StringRef FuncName = MO.getGlobal()->getName();
+ if (FuncName.compare_lower("fesetround") == 0) {
+ errs() << "Error: You are using the detectroundchange "
+ "option to detect rounding changes that will "
+ "cause LEON errata. The only way to fix this "
+ "is to remove the call to fesetround from "
+ "the source code.\n";
+ }
+ }
+ }
+ }
+ }
+
+ return Modified;
+}
+
+//*****************************************************************************
+//**** FixAllFDIVSQRT pass
+//*****************************************************************************
+// This pass fixes the incorrectly working FDIVx and FSQRTx instructions that
+// exist for some earlier versions of the LEON processor line. Five NOP
+// instructions need to be inserted after these instructions to ensure the
+// correct result is placed in the destination registers before they are used.
+//
+// This pass implements two fixes:
+// 1) fixing the FSQRTS and FSQRTD instructions.
+// 2) fixing the FDIVS and FDIVD instructions.
+//
+// FSQRTS and FDIVS are converted to FDIVD and FSQRTD respectively earlier in
+// the pipeline when this option is enabled, so this pass needs only to deal
+// with the changes that still need implementing for the "double" versions
+// of these instructions.
+//
+char FixAllFDIVSQRT::ID = 0;
+
+FixAllFDIVSQRT::FixAllFDIVSQRT() : LEONMachineFunctionPass(ID) {}
+
+bool FixAllFDIVSQRT::runOnMachineFunction(MachineFunction &MF) {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc DL = DebugLoc();
+
+ bool Modified = false;
+ for (auto MFI = MF.begin(), E = MF.end(); MFI != E; ++MFI) {
+ MachineBasicBlock &MBB = *MFI;
+ for (auto MBBI = MBB.begin(), E = MBB.end(); MBBI != E; ++MBBI) {
+ MachineInstr &MI = *MBBI;
+ unsigned Opcode = MI.getOpcode();
+
+ // Note: FDIVS and FSQRTS cannot be generated when this erratum fix is
+ // switched on so we don't need to check for them here. They will
+ // already have been converted to FSQRTD or FDIVD earlier in the
+ // pipeline.
+ if (Opcode == SP::FSQRTD || Opcode == SP::FDIVD) {
+ for (int InsertedCount = 0; InsertedCount < 5; InsertedCount++)
+ BuildMI(MBB, MBBI, DL, TII.get(SP::NOP));
+
+ MachineBasicBlock::iterator NMBBI = std::next(MBBI);
+ for (int InsertedCount = 0; InsertedCount < 28; InsertedCount++)
+ BuildMI(MBB, NMBBI, DL, TII.get(SP::NOP));
+
+ Modified = true;
+ }
+ }
+ }
+
+ return Modified;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/LeonPasses.h b/contrib/llvm/lib/Target/Sparc/LeonPasses.h
new file mode 100755
index 000000000000..1b3d9a7a32f9
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/LeonPasses.h
@@ -0,0 +1,88 @@
+//===------- LeonPasses.h - Define passes specific to LEON ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
+#define LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
+
+#include "llvm/CodeGen/MachineBasicBlock.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/Passes.h"
+
+#include "Sparc.h"
+#include "SparcSubtarget.h"
+
+namespace llvm {
+class LLVM_LIBRARY_VISIBILITY LEONMachineFunctionPass
+ : public MachineFunctionPass {
+protected:
+ const SparcSubtarget *Subtarget;
+ const int LAST_OPERAND = -1;
+
+ // this vector holds free registers that we allocate in groups for some of the
+ // LEON passes
+ std::vector<int> UsedRegisters;
+
+protected:
+ LEONMachineFunctionPass(char &ID);
+
+ int GetRegIndexForOperand(MachineInstr &MI, int OperandIndex);
+ void clearUsedRegisterList() { UsedRegisters.clear(); }
+
+ void markRegisterUsed(int registerIndex) {
+ UsedRegisters.push_back(registerIndex);
+ }
+ int getUnusedFPRegister(MachineRegisterInfo &MRI);
+};
+
+class LLVM_LIBRARY_VISIBILITY InsertNOPLoad : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ InsertNOPLoad();
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override {
+ return "InsertNOPLoad: Erratum Fix LBR35: insert a NOP instruction after "
+ "every single-cycle load instruction when the next instruction is "
+ "another load/store instruction";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY DetectRoundChange
+ : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ DetectRoundChange();
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override {
+ return "DetectRoundChange: Leon erratum detection: detect any rounding "
+ "mode change request: use only the round-to-nearest rounding mode";
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY FixAllFDIVSQRT : public LEONMachineFunctionPass {
+public:
+ static char ID;
+
+ FixAllFDIVSQRT();
+ bool runOnMachineFunction(MachineFunction &MF) override;
+
+ StringRef getPassName() const override {
+ return "FixAllFDIVSQRT: Erratum Fix LBR34: fix FDIVS/FDIVD/FSQRTS/FSQRTD "
+ "instructions with NOPs and floating-point store";
+ }
+};
+} // namespace llvm
+
+#endif // LLVM_LIB_TARGET_SPARC_LEON_PASSES_H
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
new file mode 100644
index 000000000000..f2438ee43075
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp
@@ -0,0 +1,309 @@
+//===-- SparcAsmBackend.cpp - Sparc Assembler Backend ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SparcFixupKinds.h"
+#include "MCTargetDesc/SparcMCTargetDesc.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixupKindInfo.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+static unsigned adjustFixupValue(unsigned Kind, uint64_t Value) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown fixup kind!");
+ case FK_Data_1:
+ case FK_Data_2:
+ case FK_Data_4:
+ case FK_Data_8:
+ return Value;
+
+ case Sparc::fixup_sparc_wplt30:
+ case Sparc::fixup_sparc_call30:
+ return (Value >> 2) & 0x3fffffff;
+
+ case Sparc::fixup_sparc_br22:
+ return (Value >> 2) & 0x3fffff;
+
+ case Sparc::fixup_sparc_br19:
+ return (Value >> 2) & 0x7ffff;
+
+ case Sparc::fixup_sparc_br16_2:
+ return (Value >> 2) & 0xc000;
+
+ case Sparc::fixup_sparc_br16_14:
+ return (Value >> 2) & 0x3fff;
+
+ case Sparc::fixup_sparc_pc22:
+ case Sparc::fixup_sparc_got22:
+ case Sparc::fixup_sparc_tls_gd_hi22:
+ case Sparc::fixup_sparc_tls_ldm_hi22:
+ case Sparc::fixup_sparc_tls_ie_hi22:
+ case Sparc::fixup_sparc_hi22:
+ return (Value >> 10) & 0x3fffff;
+
+ case Sparc::fixup_sparc_pc10:
+ case Sparc::fixup_sparc_got10:
+ case Sparc::fixup_sparc_tls_gd_lo10:
+ case Sparc::fixup_sparc_tls_ldm_lo10:
+ case Sparc::fixup_sparc_tls_ie_lo10:
+ case Sparc::fixup_sparc_lo10:
+ return Value & 0x3ff;
+
+ case Sparc::fixup_sparc_h44:
+ return (Value >> 22) & 0x3fffff;
+
+ case Sparc::fixup_sparc_m44:
+ return (Value >> 12) & 0x3ff;
+
+ case Sparc::fixup_sparc_l44:
+ return Value & 0xfff;
+
+ case Sparc::fixup_sparc_hh:
+ return (Value >> 42) & 0x3fffff;
+
+ case Sparc::fixup_sparc_hm:
+ return (Value >> 32) & 0x3ff;
+
+ case Sparc::fixup_sparc_tls_ldo_hix22:
+ case Sparc::fixup_sparc_tls_le_hix22:
+ case Sparc::fixup_sparc_tls_ldo_lox10:
+ case Sparc::fixup_sparc_tls_le_lox10:
+ assert(Value == 0 && "Sparc TLS relocs expect zero Value");
+ return 0;
+
+ case Sparc::fixup_sparc_tls_gd_add:
+ case Sparc::fixup_sparc_tls_gd_call:
+ case Sparc::fixup_sparc_tls_ldm_add:
+ case Sparc::fixup_sparc_tls_ldm_call:
+ case Sparc::fixup_sparc_tls_ldo_add:
+ case Sparc::fixup_sparc_tls_ie_ld:
+ case Sparc::fixup_sparc_tls_ie_ldx:
+ case Sparc::fixup_sparc_tls_ie_add:
+ return 0;
+ }
+}
+
+namespace {
+ class SparcAsmBackend : public MCAsmBackend {
+ protected:
+ const Target &TheTarget;
+ bool IsLittleEndian;
+ bool Is64Bit;
+
+ public:
+ SparcAsmBackend(const Target &T)
+ : MCAsmBackend(), TheTarget(T),
+ IsLittleEndian(StringRef(TheTarget.getName()) == "sparcel"),
+ Is64Bit(StringRef(TheTarget.getName()) == "sparcv9") {}
+
+ unsigned getNumFixupKinds() const override {
+ return Sparc::NumTargetFixupKinds;
+ }
+
+ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
+ const static MCFixupKindInfo InfosBE[Sparc::NumTargetFixupKinds] = {
+ // name offset bits flags
+ { "fixup_sparc_call30", 2, 30, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br22", 10, 22, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br19", 13, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_2", 10, 2, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_14", 18, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_hi22", 10, 22, 0 },
+ { "fixup_sparc_lo10", 22, 10, 0 },
+ { "fixup_sparc_h44", 10, 22, 0 },
+ { "fixup_sparc_m44", 22, 10, 0 },
+ { "fixup_sparc_l44", 20, 12, 0 },
+ { "fixup_sparc_hh", 10, 22, 0 },
+ { "fixup_sparc_hm", 22, 10, 0 },
+ { "fixup_sparc_pc22", 10, 22, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_pc10", 22, 10, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_got22", 10, 22, 0 },
+ { "fixup_sparc_got10", 22, 10, 0 },
+ { "fixup_sparc_wplt30", 2, 30, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_tls_gd_hi22", 10, 22, 0 },
+ { "fixup_sparc_tls_gd_lo10", 22, 10, 0 },
+ { "fixup_sparc_tls_gd_add", 0, 0, 0 },
+ { "fixup_sparc_tls_gd_call", 0, 0, 0 },
+ { "fixup_sparc_tls_ldm_hi22", 10, 22, 0 },
+ { "fixup_sparc_tls_ldm_lo10", 22, 10, 0 },
+ { "fixup_sparc_tls_ldm_add", 0, 0, 0 },
+ { "fixup_sparc_tls_ldm_call", 0, 0, 0 },
+ { "fixup_sparc_tls_ldo_hix22", 10, 22, 0 },
+ { "fixup_sparc_tls_ldo_lox10", 22, 10, 0 },
+ { "fixup_sparc_tls_ldo_add", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_hi22", 10, 22, 0 },
+ { "fixup_sparc_tls_ie_lo10", 22, 10, 0 },
+ { "fixup_sparc_tls_ie_ld", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_ldx", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_add", 0, 0, 0 },
+ { "fixup_sparc_tls_le_hix22", 0, 0, 0 },
+ { "fixup_sparc_tls_le_lox10", 0, 0, 0 }
+ };
+
+ const static MCFixupKindInfo InfosLE[Sparc::NumTargetFixupKinds] = {
+ // name offset bits flags
+ { "fixup_sparc_call30", 0, 30, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br22", 0, 22, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br19", 0, 19, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_2", 20, 2, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_br16_14", 0, 14, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_hi22", 0, 22, 0 },
+ { "fixup_sparc_lo10", 0, 10, 0 },
+ { "fixup_sparc_h44", 0, 22, 0 },
+ { "fixup_sparc_m44", 0, 10, 0 },
+ { "fixup_sparc_l44", 0, 12, 0 },
+ { "fixup_sparc_hh", 0, 22, 0 },
+ { "fixup_sparc_hm", 0, 10, 0 },
+ { "fixup_sparc_pc22", 0, 22, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_pc10", 0, 10, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_got22", 0, 22, 0 },
+ { "fixup_sparc_got10", 0, 10, 0 },
+ { "fixup_sparc_wplt30", 0, 30, MCFixupKindInfo::FKF_IsPCRel },
+ { "fixup_sparc_tls_gd_hi22", 0, 22, 0 },
+ { "fixup_sparc_tls_gd_lo10", 0, 10, 0 },
+ { "fixup_sparc_tls_gd_add", 0, 0, 0 },
+ { "fixup_sparc_tls_gd_call", 0, 0, 0 },
+ { "fixup_sparc_tls_ldm_hi22", 0, 22, 0 },
+ { "fixup_sparc_tls_ldm_lo10", 0, 10, 0 },
+ { "fixup_sparc_tls_ldm_add", 0, 0, 0 },
+ { "fixup_sparc_tls_ldm_call", 0, 0, 0 },
+ { "fixup_sparc_tls_ldo_hix22", 0, 22, 0 },
+ { "fixup_sparc_tls_ldo_lox10", 0, 10, 0 },
+ { "fixup_sparc_tls_ldo_add", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_hi22", 0, 22, 0 },
+ { "fixup_sparc_tls_ie_lo10", 0, 10, 0 },
+ { "fixup_sparc_tls_ie_ld", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_ldx", 0, 0, 0 },
+ { "fixup_sparc_tls_ie_add", 0, 0, 0 },
+ { "fixup_sparc_tls_le_hix22", 0, 0, 0 },
+ { "fixup_sparc_tls_le_lox10", 0, 0, 0 }
+ };
+
+ if (Kind < FirstTargetFixupKind)
+ return MCAsmBackend::getFixupKindInfo(Kind);
+
+ assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
+ "Invalid kind!");
+ if (IsLittleEndian)
+ return InfosLE[Kind - FirstTargetFixupKind];
+
+ return InfosBE[Kind - FirstTargetFixupKind];
+ }
+
+ bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target) override {
+ switch ((Sparc::Fixups)Fixup.getKind()) {
+ default:
+ return false;
+ case Sparc::fixup_sparc_wplt30:
+ if (Target.getSymA()->getSymbol().isTemporary())
+ return false;
+ LLVM_FALLTHROUGH;
+ case Sparc::fixup_sparc_tls_gd_hi22:
+ case Sparc::fixup_sparc_tls_gd_lo10:
+ case Sparc::fixup_sparc_tls_gd_add:
+ case Sparc::fixup_sparc_tls_gd_call:
+ case Sparc::fixup_sparc_tls_ldm_hi22:
+ case Sparc::fixup_sparc_tls_ldm_lo10:
+ case Sparc::fixup_sparc_tls_ldm_add:
+ case Sparc::fixup_sparc_tls_ldm_call:
+ case Sparc::fixup_sparc_tls_ldo_hix22:
+ case Sparc::fixup_sparc_tls_ldo_lox10:
+ case Sparc::fixup_sparc_tls_ldo_add:
+ case Sparc::fixup_sparc_tls_ie_hi22:
+ case Sparc::fixup_sparc_tls_ie_lo10:
+ case Sparc::fixup_sparc_tls_ie_ld:
+ case Sparc::fixup_sparc_tls_ie_ldx:
+ case Sparc::fixup_sparc_tls_ie_add:
+ case Sparc::fixup_sparc_tls_le_hix22:
+ case Sparc::fixup_sparc_tls_le_lox10:
+ return true;
+ }
+ }
+
+ bool mayNeedRelaxation(const MCInst &Inst) const override {
+ // FIXME.
+ return false;
+ }
+
+ /// fixupNeedsRelaxation - Target specific predicate for whether a given
+ /// fixup requires the associated instruction to be relaxed.
+ bool fixupNeedsRelaxation(const MCFixup &Fixup,
+ uint64_t Value,
+ const MCRelaxableFragment *DF,
+ const MCAsmLayout &Layout) const override {
+ // FIXME.
+ llvm_unreachable("fixupNeedsRelaxation() unimplemented");
+ return false;
+ }
+ void relaxInstruction(const MCInst &Inst, const MCSubtargetInfo &STI,
+ MCInst &Res) const override {
+ // FIXME.
+ llvm_unreachable("relaxInstruction() unimplemented");
+ }
+
+ bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override {
+ // Cannot emit NOP with size not multiple of 32 bits.
+ if (Count % 4 != 0)
+ return false;
+
+ uint64_t NumNops = Count / 4;
+ for (uint64_t i = 0; i != NumNops; ++i)
+ OW->write32(0x01000000);
+
+ return true;
+ }
+ };
+
+ class ELFSparcAsmBackend : public SparcAsmBackend {
+ Triple::OSType OSType;
+ public:
+ ELFSparcAsmBackend(const Target &T, Triple::OSType OSType) :
+ SparcAsmBackend(T), OSType(OSType) { }
+
+ void applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
+ const MCValue &Target, MutableArrayRef<char> Data,
+ uint64_t Value, bool IsResolved) const override {
+
+ Value = adjustFixupValue(Fixup.getKind(), Value);
+ if (!Value) return; // Doesn't change encoding.
+
+ unsigned Offset = Fixup.getOffset();
+
+ // For each byte of the fragment that the fixup touches, mask in the bits
+ // from the fixup value. The Value has been "split up" into the
+ // appropriate bitfields above.
+ for (unsigned i = 0; i != 4; ++i) {
+ unsigned Idx = IsLittleEndian ? i : 3 - i;
+ Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
+ }
+ }
+
+ std::unique_ptr<MCObjectWriter>
+ createObjectWriter(raw_pwrite_stream &OS) const override {
+ uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(OSType);
+ return createSparcELFObjectWriter(OS, Is64Bit, IsLittleEndian, OSABI);
+ }
+ };
+
+} // end anonymous namespace
+
+MCAsmBackend *llvm::createSparcAsmBackend(const Target &T,
+ const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options) {
+ return new ELFSparcAsmBackend(T, STI.getTargetTriple().getOS());
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
new file mode 100644
index 000000000000..a204036a0975
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcELFObjectWriter.cpp
@@ -0,0 +1,140 @@
+//===-- SparcELFObjectWriter.cpp - Sparc ELF Writer -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SparcFixupKinds.h"
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "MCTargetDesc/SparcMCTargetDesc.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCELFObjectWriter.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCObjectWriter.h"
+#include "llvm/MC/MCValue.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+namespace {
+ class SparcELFObjectWriter : public MCELFObjectTargetWriter {
+ public:
+ SparcELFObjectWriter(bool Is64Bit, uint8_t OSABI)
+ : MCELFObjectTargetWriter(Is64Bit, OSABI,
+ Is64Bit ? ELF::EM_SPARCV9 : ELF::EM_SPARC,
+ /*HasRelocationAddend*/ true) {}
+
+ ~SparcELFObjectWriter() override {}
+
+ protected:
+ unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
+ const MCFixup &Fixup, bool IsPCRel) const override;
+
+ bool needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const override;
+
+ };
+}
+
+unsigned SparcELFObjectWriter::getRelocType(MCContext &Ctx,
+ const MCValue &Target,
+ const MCFixup &Fixup,
+ bool IsPCRel) const {
+
+ if (const SparcMCExpr *SExpr = dyn_cast<SparcMCExpr>(Fixup.getValue())) {
+ if (SExpr->getKind() == SparcMCExpr::VK_Sparc_R_DISP32)
+ return ELF::R_SPARC_DISP32;
+ }
+
+ if (IsPCRel) {
+ switch((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("Unimplemented fixup -> relocation");
+ case FK_Data_1: return ELF::R_SPARC_DISP8;
+ case FK_Data_2: return ELF::R_SPARC_DISP16;
+ case FK_Data_4: return ELF::R_SPARC_DISP32;
+ case FK_Data_8: return ELF::R_SPARC_DISP64;
+ case Sparc::fixup_sparc_call30: return ELF::R_SPARC_WDISP30;
+ case Sparc::fixup_sparc_br22: return ELF::R_SPARC_WDISP22;
+ case Sparc::fixup_sparc_br19: return ELF::R_SPARC_WDISP19;
+ case Sparc::fixup_sparc_pc22: return ELF::R_SPARC_PC22;
+ case Sparc::fixup_sparc_pc10: return ELF::R_SPARC_PC10;
+ case Sparc::fixup_sparc_wplt30: return ELF::R_SPARC_WPLT30;
+ }
+ }
+
+ switch((unsigned)Fixup.getKind()) {
+ default:
+ llvm_unreachable("Unimplemented fixup -> relocation");
+ case FK_Data_1: return ELF::R_SPARC_8;
+ case FK_Data_2: return ((Fixup.getOffset() % 2)
+ ? ELF::R_SPARC_UA16
+ : ELF::R_SPARC_16);
+ case FK_Data_4: return ((Fixup.getOffset() % 4)
+ ? ELF::R_SPARC_UA32
+ : ELF::R_SPARC_32);
+ case FK_Data_8: return ((Fixup.getOffset() % 8)
+ ? ELF::R_SPARC_UA64
+ : ELF::R_SPARC_64);
+ case Sparc::fixup_sparc_hi22: return ELF::R_SPARC_HI22;
+ case Sparc::fixup_sparc_lo10: return ELF::R_SPARC_LO10;
+ case Sparc::fixup_sparc_h44: return ELF::R_SPARC_H44;
+ case Sparc::fixup_sparc_m44: return ELF::R_SPARC_M44;
+ case Sparc::fixup_sparc_l44: return ELF::R_SPARC_L44;
+ case Sparc::fixup_sparc_hh: return ELF::R_SPARC_HH22;
+ case Sparc::fixup_sparc_hm: return ELF::R_SPARC_HM10;
+ case Sparc::fixup_sparc_got22: return ELF::R_SPARC_GOT22;
+ case Sparc::fixup_sparc_got10: return ELF::R_SPARC_GOT10;
+ case Sparc::fixup_sparc_tls_gd_hi22: return ELF::R_SPARC_TLS_GD_HI22;
+ case Sparc::fixup_sparc_tls_gd_lo10: return ELF::R_SPARC_TLS_GD_LO10;
+ case Sparc::fixup_sparc_tls_gd_add: return ELF::R_SPARC_TLS_GD_ADD;
+ case Sparc::fixup_sparc_tls_gd_call: return ELF::R_SPARC_TLS_GD_CALL;
+ case Sparc::fixup_sparc_tls_ldm_hi22: return ELF::R_SPARC_TLS_LDM_HI22;
+ case Sparc::fixup_sparc_tls_ldm_lo10: return ELF::R_SPARC_TLS_LDM_LO10;
+ case Sparc::fixup_sparc_tls_ldm_add: return ELF::R_SPARC_TLS_LDM_ADD;
+ case Sparc::fixup_sparc_tls_ldm_call: return ELF::R_SPARC_TLS_LDM_CALL;
+ case Sparc::fixup_sparc_tls_ldo_hix22: return ELF::R_SPARC_TLS_LDO_HIX22;
+ case Sparc::fixup_sparc_tls_ldo_lox10: return ELF::R_SPARC_TLS_LDO_LOX10;
+ case Sparc::fixup_sparc_tls_ldo_add: return ELF::R_SPARC_TLS_LDO_ADD;
+ case Sparc::fixup_sparc_tls_ie_hi22: return ELF::R_SPARC_TLS_IE_HI22;
+ case Sparc::fixup_sparc_tls_ie_lo10: return ELF::R_SPARC_TLS_IE_LO10;
+ case Sparc::fixup_sparc_tls_ie_ld: return ELF::R_SPARC_TLS_IE_LD;
+ case Sparc::fixup_sparc_tls_ie_ldx: return ELF::R_SPARC_TLS_IE_LDX;
+ case Sparc::fixup_sparc_tls_ie_add: return ELF::R_SPARC_TLS_IE_ADD;
+ case Sparc::fixup_sparc_tls_le_hix22: return ELF::R_SPARC_TLS_LE_HIX22;
+ case Sparc::fixup_sparc_tls_le_lox10: return ELF::R_SPARC_TLS_LE_LOX10;
+ }
+
+ return ELF::R_SPARC_NONE;
+}
+
+bool SparcELFObjectWriter::needsRelocateWithSymbol(const MCSymbol &Sym,
+ unsigned Type) const {
+ switch (Type) {
+ default:
+ return false;
+
+ // All relocations that use a GOT need a symbol, not an offset, as
+ // the offset of the symbol within the section is irrelevant to
+ // where the GOT entry is. Don't need to list all the TLS entries,
+ // as they're all marked as requiring a symbol anyways.
+ case ELF::R_SPARC_GOT10:
+ case ELF::R_SPARC_GOT13:
+ case ELF::R_SPARC_GOT22:
+ case ELF::R_SPARC_GOTDATA_HIX22:
+ case ELF::R_SPARC_GOTDATA_LOX10:
+ case ELF::R_SPARC_GOTDATA_OP_HIX22:
+ case ELF::R_SPARC_GOTDATA_OP_LOX10:
+ return true;
+ }
+}
+
+std::unique_ptr<MCObjectWriter>
+llvm::createSparcELFObjectWriter(raw_pwrite_stream &OS, bool Is64Bit,
+ bool IsLittleEndian, uint8_t OSABI) {
+ auto MOTW = llvm::make_unique<SparcELFObjectWriter>(Is64Bit, OSABI);
+ return createELFObjectWriter(std::move(MOTW), OS, IsLittleEndian);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
new file mode 100644
index 000000000000..8d79396d936e
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcFixupKinds.h
@@ -0,0 +1,97 @@
+//===-- SparcFixupKinds.h - Sparc Specific Fixup Entries --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCFIXUPKINDS_H
+#define LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCFIXUPKINDS_H
+
+#include "llvm/MC/MCFixup.h"
+
+namespace llvm {
+ namespace Sparc {
+ enum Fixups {
+ // fixup_sparc_call30 - 30-bit PC relative relocation for call
+ fixup_sparc_call30 = FirstTargetFixupKind,
+
+ /// fixup_sparc_br22 - 22-bit PC relative relocation for
+ /// branches
+ fixup_sparc_br22,
+
+ /// fixup_sparc_br19 - 19-bit PC relative relocation for
+ /// branches on icc/xcc
+ fixup_sparc_br19,
+
+ /// fixup_sparc_bpr - 16-bit fixup for bpr
+ fixup_sparc_br16_2,
+ fixup_sparc_br16_14,
+
+ /// fixup_sparc_hi22 - 22-bit fixup corresponding to %hi(foo)
+ /// for sethi
+ fixup_sparc_hi22,
+
+ /// fixup_sparc_lo10 - 10-bit fixup corresponding to %lo(foo)
+ fixup_sparc_lo10,
+
+ /// fixup_sparc_h44 - 22-bit fixup corresponding to %h44(foo)
+ fixup_sparc_h44,
+
+ /// fixup_sparc_m44 - 10-bit fixup corresponding to %m44(foo)
+ fixup_sparc_m44,
+
+ /// fixup_sparc_l44 - 12-bit fixup corresponding to %l44(foo)
+ fixup_sparc_l44,
+
+ /// fixup_sparc_hh - 22-bit fixup corresponding to %hh(foo)
+ fixup_sparc_hh,
+
+ /// fixup_sparc_hm - 10-bit fixup corresponding to %hm(foo)
+ fixup_sparc_hm,
+
+ /// fixup_sparc_pc22 - 22-bit fixup corresponding to %pc22(foo)
+ fixup_sparc_pc22,
+
+ /// fixup_sparc_pc10 - 10-bit fixup corresponding to %pc10(foo)
+ fixup_sparc_pc10,
+
+ /// fixup_sparc_got22 - 22-bit fixup corresponding to %got22(foo)
+ fixup_sparc_got22,
+
+ /// fixup_sparc_got10 - 10-bit fixup corresponding to %got10(foo)
+ fixup_sparc_got10,
+
+ /// fixup_sparc_wplt30
+ fixup_sparc_wplt30,
+
+ /// fixups for Thread Local Storage
+ fixup_sparc_tls_gd_hi22,
+ fixup_sparc_tls_gd_lo10,
+ fixup_sparc_tls_gd_add,
+ fixup_sparc_tls_gd_call,
+ fixup_sparc_tls_ldm_hi22,
+ fixup_sparc_tls_ldm_lo10,
+ fixup_sparc_tls_ldm_add,
+ fixup_sparc_tls_ldm_call,
+ fixup_sparc_tls_ldo_hix22,
+ fixup_sparc_tls_ldo_lox10,
+ fixup_sparc_tls_ldo_add,
+ fixup_sparc_tls_ie_hi22,
+ fixup_sparc_tls_ie_lo10,
+ fixup_sparc_tls_ie_ld,
+ fixup_sparc_tls_ie_ldx,
+ fixup_sparc_tls_ie_add,
+ fixup_sparc_tls_le_hix22,
+ fixup_sparc_tls_le_lox10,
+
+ // Marker
+ LastTargetFixupKind,
+ NumTargetFixupKinds = LastTargetFixupKind - FirstTargetFixupKind
+ };
+ }
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
new file mode 100644
index 000000000000..50e8825b15e8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.cpp
@@ -0,0 +1,73 @@
+//===- SparcMCAsmInfo.cpp - Sparc asm properties --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declarations of the SparcMCAsmInfo properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcMCAsmInfo.h"
+#include "SparcMCExpr.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCTargetOptions.h"
+
+using namespace llvm;
+
+void SparcELFMCAsmInfo::anchor() {}
+
+SparcELFMCAsmInfo::SparcELFMCAsmInfo(const Triple &TheTriple) {
+ bool isV9 = (TheTriple.getArch() == Triple::sparcv9);
+ IsLittleEndian = (TheTriple.getArch() == Triple::sparcel);
+
+ if (isV9) {
+ CodePointerSize = CalleeSaveStackSlotSize = 8;
+ }
+
+ Data16bitsDirective = "\t.half\t";
+ Data32bitsDirective = "\t.word\t";
+ // .xword is only supported by V9.
+ Data64bitsDirective = (isV9) ? "\t.xword\t" : nullptr;
+ ZeroDirective = "\t.skip\t";
+ CommentString = "!";
+ SupportsDebugInformation = true;
+
+ ExceptionsType = ExceptionHandling::DwarfCFI;
+
+ SunStyleELFSectionSwitchSyntax = true;
+ UsesELFSectionDirectiveForBSS = true;
+
+ UseIntegratedAssembler = true;
+}
+
+const MCExpr*
+SparcELFMCAsmInfo::getExprForPersonalitySymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const {
+ if (Encoding & dwarf::DW_EH_PE_pcrel) {
+ MCContext &Ctx = Streamer.getContext();
+ return SparcMCExpr::create(SparcMCExpr::VK_Sparc_R_DISP32,
+ MCSymbolRefExpr::create(Sym, Ctx), Ctx);
+ }
+
+ return MCAsmInfo::getExprForPersonalitySymbol(Sym, Encoding, Streamer);
+}
+
+const MCExpr*
+SparcELFMCAsmInfo::getExprForFDESymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const {
+ if (Encoding & dwarf::DW_EH_PE_pcrel) {
+ MCContext &Ctx = Streamer.getContext();
+ return SparcMCExpr::create(SparcMCExpr::VK_Sparc_R_DISP32,
+ MCSymbolRefExpr::create(Sym, Ctx), Ctx);
+ }
+ return MCAsmInfo::getExprForFDESymbol(Sym, Encoding, Streamer);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
new file mode 100644
index 000000000000..5e8d0cb50312
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCAsmInfo.h
@@ -0,0 +1,40 @@
+//===- SparcMCAsmInfo.h - Sparc asm properties -----------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the declaration of the SparcMCAsmInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCASMINFO_H
+#define LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCASMINFO_H
+
+#include "llvm/MC/MCAsmInfoELF.h"
+
+namespace llvm {
+
+class Triple;
+
+class SparcELFMCAsmInfo : public MCAsmInfoELF {
+ void anchor() override;
+
+public:
+ explicit SparcELFMCAsmInfo(const Triple &TheTriple);
+
+ const MCExpr*
+ getExprForPersonalitySymbol(const MCSymbol *Sym, unsigned Encoding,
+ MCStreamer &Streamer) const override;
+ const MCExpr* getExprForFDESymbol(const MCSymbol *Sym,
+ unsigned Encoding,
+ MCStreamer &Streamer) const override;
+
+};
+
+} // end namespace llvm
+
+#endif // LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCASMINFO_H
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
new file mode 100644
index 000000000000..684f66970dbe
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCCodeEmitter.cpp
@@ -0,0 +1,238 @@
+//===-- SparcMCCodeEmitter.cpp - Convert Sparc code to machine code -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SparcMCCodeEmitter class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SparcFixupKinds.h"
+#include "SparcMCExpr.h"
+#include "SparcMCTargetDesc.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCFixup.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/EndianStream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstdint>
+
+using namespace llvm;
+
+#define DEBUG_TYPE "mccodeemitter"
+
+STATISTIC(MCNumEmitted, "Number of MC instructions emitted");
+
+namespace {
+
+class SparcMCCodeEmitter : public MCCodeEmitter {
+ const MCInstrInfo &MCII;
+ MCContext &Ctx;
+
+public:
+ SparcMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
+ : MCII(mcii), Ctx(ctx) {}
+ SparcMCCodeEmitter(const SparcMCCodeEmitter &) = delete;
+ SparcMCCodeEmitter &operator=(const SparcMCCodeEmitter &) = delete;
+ ~SparcMCCodeEmitter() override = default;
+
+ void encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const override;
+
+ // getBinaryCodeForInstr - TableGen'erated function for getting the
+ // binary encoding for an instruction.
+ uint64_t getBinaryCodeForInstr(const MCInst &MI,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ /// getMachineOpValue - Return binary encoding of operand. If the machine
+ /// operand requires relocation, record the relocation and return zero.
+ unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+ unsigned getCallTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getBranchPredTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+ unsigned getBranchOnRegTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
+private:
+ uint64_t computeAvailableFeatures(const FeatureBitset &FB) const;
+ void verifyInstructionPredicates(const MCInst &MI,
+ uint64_t AvailableFeatures) const;
+};
+
+} // end anonymous namespace
+
+void SparcMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ verifyInstructionPredicates(MI,
+ computeAvailableFeatures(STI.getFeatureBits()));
+
+ unsigned Bits = getBinaryCodeForInstr(MI, Fixups, STI);
+
+ if (Ctx.getAsmInfo()->isLittleEndian()) {
+ // Output the bits in little-endian byte order.
+ support::endian::Writer<support::little>(OS).write<uint32_t>(Bits);
+ } else {
+ // Output the bits in big-endian byte order.
+ support::endian::Writer<support::big>(OS).write<uint32_t>(Bits);
+ }
+ unsigned tlsOpNo = 0;
+ switch (MI.getOpcode()) {
+ default: break;
+ case SP::TLS_CALL: tlsOpNo = 1; break;
+ case SP::TLS_ADDrr:
+ case SP::TLS_ADDXrr:
+ case SP::TLS_LDrr:
+ case SP::TLS_LDXrr: tlsOpNo = 3; break;
+ }
+ if (tlsOpNo != 0) {
+ const MCOperand &MO = MI.getOperand(tlsOpNo);
+ uint64_t op = getMachineOpValue(MI, MO, Fixups, STI);
+ assert(op == 0 && "Unexpected operand value!");
+ (void)op; // suppress warning.
+ }
+
+ ++MCNumEmitted; // Keep track of the # of mi's emitted.
+}
+
+unsigned SparcMCCodeEmitter::
+getMachineOpValue(const MCInst &MI, const MCOperand &MO,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ if (MO.isReg())
+ return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
+
+ if (MO.isImm())
+ return MO.getImm();
+
+ assert(MO.isExpr());
+ const MCExpr *Expr = MO.getExpr();
+ if (const SparcMCExpr *SExpr = dyn_cast<SparcMCExpr>(Expr)) {
+ MCFixupKind Kind = (MCFixupKind)SExpr->getFixupKind();
+ Fixups.push_back(MCFixup::create(0, Expr, Kind));
+ return 0;
+ }
+
+ int64_t Res;
+ if (Expr->evaluateAsAbsolute(Res))
+ return Res;
+
+ llvm_unreachable("Unhandled expression!");
+ return 0;
+}
+
+unsigned SparcMCCodeEmitter::
+getCallTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ if (MI.getOpcode() == SP::TLS_CALL) {
+ // No fixups for __tls_get_addr. Will emit for fixups for tls_symbol in
+ // encodeInstruction.
+#ifndef NDEBUG
+ // Verify that the callee is actually __tls_get_addr.
+ const SparcMCExpr *SExpr = dyn_cast<SparcMCExpr>(MO.getExpr());
+ assert(SExpr && SExpr->getSubExpr()->getKind() == MCExpr::SymbolRef &&
+ "Unexpected expression in TLS_CALL");
+ const MCSymbolRefExpr *SymExpr = cast<MCSymbolRefExpr>(SExpr->getSubExpr());
+ assert(SymExpr->getSymbol().getName() == "__tls_get_addr" &&
+ "Unexpected function for TLS_CALL");
+#endif
+ return 0;
+ }
+
+ MCFixupKind fixupKind = (MCFixupKind)Sparc::fixup_sparc_call30;
+
+ if (const SparcMCExpr *SExpr = dyn_cast<SparcMCExpr>(MO.getExpr())) {
+ if (SExpr->getKind() == SparcMCExpr::VK_Sparc_WPLT30)
+ fixupKind = (MCFixupKind)Sparc::fixup_sparc_wplt30;
+ }
+
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(), fixupKind));
+
+ return 0;
+}
+
+unsigned SparcMCCodeEmitter::
+getBranchTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br22));
+ return 0;
+}
+
+unsigned SparcMCCodeEmitter::
+getBranchPredTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br19));
+ return 0;
+}
+
+unsigned SparcMCCodeEmitter::
+getBranchOnRegTargetOpValue(const MCInst &MI, unsigned OpNo,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ const MCOperand &MO = MI.getOperand(OpNo);
+ if (MO.isReg() || MO.isImm())
+ return getMachineOpValue(MI, MO, Fixups, STI);
+
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br16_2));
+ Fixups.push_back(MCFixup::create(0, MO.getExpr(),
+ (MCFixupKind)Sparc::fixup_sparc_br16_14));
+
+ return 0;
+}
+
+#define ENABLE_INSTR_PREDICATE_VERIFIER
+#include "SparcGenMCCodeEmitter.inc"
+
+MCCodeEmitter *llvm::createSparcMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx) {
+ return new SparcMCCodeEmitter(MCII, Ctx);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
new file mode 100644
index 000000000000..a77f760d9eff
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.cpp
@@ -0,0 +1,220 @@
+//===-- SparcMCExpr.cpp - Sparc specific MC expression classes --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation of the assembly expression modifiers
+// accepted by the Sparc architecture (e.g. "%hi", "%lo", ...).
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcMCExpr.h"
+#include "llvm/MC/MCAssembler.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCObjectStreamer.h"
+#include "llvm/MC/MCSymbolELF.h"
+#include "llvm/Object/ELF.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "sparcmcexpr"
+
+const SparcMCExpr*
+SparcMCExpr::create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx) {
+ return new (Ctx) SparcMCExpr(Kind, Expr);
+}
+
+void SparcMCExpr::printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const {
+
+ bool closeParen = printVariantKind(OS, Kind);
+
+ const MCExpr *Expr = getSubExpr();
+ Expr->print(OS, MAI);
+
+ if (closeParen)
+ OS << ')';
+}
+
+bool SparcMCExpr::printVariantKind(raw_ostream &OS, VariantKind Kind)
+{
+ bool closeParen = true;
+ switch (Kind) {
+ case VK_Sparc_None: closeParen = false; break;
+ case VK_Sparc_LO: OS << "%lo("; break;
+ case VK_Sparc_HI: OS << "%hi("; break;
+ case VK_Sparc_H44: OS << "%h44("; break;
+ case VK_Sparc_M44: OS << "%m44("; break;
+ case VK_Sparc_L44: OS << "%l44("; break;
+ case VK_Sparc_HH: OS << "%hh("; break;
+ case VK_Sparc_HM: OS << "%hm("; break;
+ // FIXME: use %pc22/%pc10, if system assembler supports them.
+ case VK_Sparc_PC22: OS << "%hi("; break;
+ case VK_Sparc_PC10: OS << "%lo("; break;
+ // FIXME: use %got22/%got10, if system assembler supports them.
+ case VK_Sparc_GOT22: OS << "%hi("; break;
+ case VK_Sparc_GOT10: OS << "%lo("; break;
+ case VK_Sparc_WPLT30: closeParen = false; break;
+ case VK_Sparc_R_DISP32: OS << "%r_disp32("; break;
+ case VK_Sparc_TLS_GD_HI22: OS << "%tgd_hi22("; break;
+ case VK_Sparc_TLS_GD_LO10: OS << "%tgd_lo10("; break;
+ case VK_Sparc_TLS_GD_ADD: OS << "%tgd_add("; break;
+ case VK_Sparc_TLS_GD_CALL: OS << "%tgd_call("; break;
+ case VK_Sparc_TLS_LDM_HI22: OS << "%tldm_hi22("; break;
+ case VK_Sparc_TLS_LDM_LO10: OS << "%tldm_lo10("; break;
+ case VK_Sparc_TLS_LDM_ADD: OS << "%tldm_add("; break;
+ case VK_Sparc_TLS_LDM_CALL: OS << "%tldm_call("; break;
+ case VK_Sparc_TLS_LDO_HIX22: OS << "%tldo_hix22("; break;
+ case VK_Sparc_TLS_LDO_LOX10: OS << "%tldo_lox10("; break;
+ case VK_Sparc_TLS_LDO_ADD: OS << "%tldo_add("; break;
+ case VK_Sparc_TLS_IE_HI22: OS << "%tie_hi22("; break;
+ case VK_Sparc_TLS_IE_LO10: OS << "%tie_lo10("; break;
+ case VK_Sparc_TLS_IE_LD: OS << "%tie_ld("; break;
+ case VK_Sparc_TLS_IE_LDX: OS << "%tie_ldx("; break;
+ case VK_Sparc_TLS_IE_ADD: OS << "%tie_add("; break;
+ case VK_Sparc_TLS_LE_HIX22: OS << "%tle_hix22("; break;
+ case VK_Sparc_TLS_LE_LOX10: OS << "%tle_lox10("; break;
+ }
+ return closeParen;
+}
+
+SparcMCExpr::VariantKind SparcMCExpr::parseVariantKind(StringRef name)
+{
+ return StringSwitch<SparcMCExpr::VariantKind>(name)
+ .Case("lo", VK_Sparc_LO)
+ .Case("hi", VK_Sparc_HI)
+ .Case("h44", VK_Sparc_H44)
+ .Case("m44", VK_Sparc_M44)
+ .Case("l44", VK_Sparc_L44)
+ .Case("hh", VK_Sparc_HH)
+ .Case("hm", VK_Sparc_HM)
+ .Case("pc22", VK_Sparc_PC22)
+ .Case("pc10", VK_Sparc_PC10)
+ .Case("got22", VK_Sparc_GOT22)
+ .Case("got10", VK_Sparc_GOT10)
+ .Case("r_disp32", VK_Sparc_R_DISP32)
+ .Case("tgd_hi22", VK_Sparc_TLS_GD_HI22)
+ .Case("tgd_lo10", VK_Sparc_TLS_GD_LO10)
+ .Case("tgd_add", VK_Sparc_TLS_GD_ADD)
+ .Case("tgd_call", VK_Sparc_TLS_GD_CALL)
+ .Case("tldm_hi22", VK_Sparc_TLS_LDM_HI22)
+ .Case("tldm_lo10", VK_Sparc_TLS_LDM_LO10)
+ .Case("tldm_add", VK_Sparc_TLS_LDM_ADD)
+ .Case("tldm_call", VK_Sparc_TLS_LDM_CALL)
+ .Case("tldo_hix22", VK_Sparc_TLS_LDO_HIX22)
+ .Case("tldo_lox10", VK_Sparc_TLS_LDO_LOX10)
+ .Case("tldo_add", VK_Sparc_TLS_LDO_ADD)
+ .Case("tie_hi22", VK_Sparc_TLS_IE_HI22)
+ .Case("tie_lo10", VK_Sparc_TLS_IE_LO10)
+ .Case("tie_ld", VK_Sparc_TLS_IE_LD)
+ .Case("tie_ldx", VK_Sparc_TLS_IE_LDX)
+ .Case("tie_add", VK_Sparc_TLS_IE_ADD)
+ .Case("tle_hix22", VK_Sparc_TLS_LE_HIX22)
+ .Case("tle_lox10", VK_Sparc_TLS_LE_LOX10)
+ .Default(VK_Sparc_None);
+}
+
+Sparc::Fixups SparcMCExpr::getFixupKind(SparcMCExpr::VariantKind Kind) {
+ switch (Kind) {
+ default: llvm_unreachable("Unhandled SparcMCExpr::VariantKind");
+ case VK_Sparc_LO: return Sparc::fixup_sparc_lo10;
+ case VK_Sparc_HI: return Sparc::fixup_sparc_hi22;
+ case VK_Sparc_H44: return Sparc::fixup_sparc_h44;
+ case VK_Sparc_M44: return Sparc::fixup_sparc_m44;
+ case VK_Sparc_L44: return Sparc::fixup_sparc_l44;
+ case VK_Sparc_HH: return Sparc::fixup_sparc_hh;
+ case VK_Sparc_HM: return Sparc::fixup_sparc_hm;
+ case VK_Sparc_PC22: return Sparc::fixup_sparc_pc22;
+ case VK_Sparc_PC10: return Sparc::fixup_sparc_pc10;
+ case VK_Sparc_GOT22: return Sparc::fixup_sparc_got22;
+ case VK_Sparc_GOT10: return Sparc::fixup_sparc_got10;
+ case VK_Sparc_WPLT30: return Sparc::fixup_sparc_wplt30;
+ case VK_Sparc_TLS_GD_HI22: return Sparc::fixup_sparc_tls_gd_hi22;
+ case VK_Sparc_TLS_GD_LO10: return Sparc::fixup_sparc_tls_gd_lo10;
+ case VK_Sparc_TLS_GD_ADD: return Sparc::fixup_sparc_tls_gd_add;
+ case VK_Sparc_TLS_GD_CALL: return Sparc::fixup_sparc_tls_gd_call;
+ case VK_Sparc_TLS_LDM_HI22: return Sparc::fixup_sparc_tls_ldm_hi22;
+ case VK_Sparc_TLS_LDM_LO10: return Sparc::fixup_sparc_tls_ldm_lo10;
+ case VK_Sparc_TLS_LDM_ADD: return Sparc::fixup_sparc_tls_ldm_add;
+ case VK_Sparc_TLS_LDM_CALL: return Sparc::fixup_sparc_tls_ldm_call;
+ case VK_Sparc_TLS_LDO_HIX22: return Sparc::fixup_sparc_tls_ldo_hix22;
+ case VK_Sparc_TLS_LDO_LOX10: return Sparc::fixup_sparc_tls_ldo_lox10;
+ case VK_Sparc_TLS_LDO_ADD: return Sparc::fixup_sparc_tls_ldo_add;
+ case VK_Sparc_TLS_IE_HI22: return Sparc::fixup_sparc_tls_ie_hi22;
+ case VK_Sparc_TLS_IE_LO10: return Sparc::fixup_sparc_tls_ie_lo10;
+ case VK_Sparc_TLS_IE_LD: return Sparc::fixup_sparc_tls_ie_ld;
+ case VK_Sparc_TLS_IE_LDX: return Sparc::fixup_sparc_tls_ie_ldx;
+ case VK_Sparc_TLS_IE_ADD: return Sparc::fixup_sparc_tls_ie_add;
+ case VK_Sparc_TLS_LE_HIX22: return Sparc::fixup_sparc_tls_le_hix22;
+ case VK_Sparc_TLS_LE_LOX10: return Sparc::fixup_sparc_tls_le_lox10;
+ }
+}
+
+bool
+SparcMCExpr::evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const {
+ return getSubExpr()->evaluateAsRelocatable(Res, Layout, Fixup);
+}
+
+static void fixELFSymbolsInTLSFixupsImpl(const MCExpr *Expr, MCAssembler &Asm) {
+ switch (Expr->getKind()) {
+ case MCExpr::Target:
+ llvm_unreachable("Can't handle nested target expr!");
+ break;
+
+ case MCExpr::Constant:
+ break;
+
+ case MCExpr::Binary: {
+ const MCBinaryExpr *BE = cast<MCBinaryExpr>(Expr);
+ fixELFSymbolsInTLSFixupsImpl(BE->getLHS(), Asm);
+ fixELFSymbolsInTLSFixupsImpl(BE->getRHS(), Asm);
+ break;
+ }
+
+ case MCExpr::SymbolRef: {
+ const MCSymbolRefExpr &SymRef = *cast<MCSymbolRefExpr>(Expr);
+ cast<MCSymbolELF>(SymRef.getSymbol()).setType(ELF::STT_TLS);
+ break;
+ }
+
+ case MCExpr::Unary:
+ fixELFSymbolsInTLSFixupsImpl(cast<MCUnaryExpr>(Expr)->getSubExpr(), Asm);
+ break;
+ }
+
+}
+
+void SparcMCExpr::fixELFSymbolsInTLSFixups(MCAssembler &Asm) const {
+ switch(getKind()) {
+ default: return;
+ case VK_Sparc_TLS_GD_HI22:
+ case VK_Sparc_TLS_GD_LO10:
+ case VK_Sparc_TLS_GD_ADD:
+ case VK_Sparc_TLS_GD_CALL:
+ case VK_Sparc_TLS_LDM_HI22:
+ case VK_Sparc_TLS_LDM_LO10:
+ case VK_Sparc_TLS_LDM_ADD:
+ case VK_Sparc_TLS_LDM_CALL:
+ case VK_Sparc_TLS_LDO_HIX22:
+ case VK_Sparc_TLS_LDO_LOX10:
+ case VK_Sparc_TLS_LDO_ADD:
+ case VK_Sparc_TLS_IE_HI22:
+ case VK_Sparc_TLS_IE_LO10:
+ case VK_Sparc_TLS_IE_LD:
+ case VK_Sparc_TLS_IE_LDX:
+ case VK_Sparc_TLS_IE_ADD:
+ case VK_Sparc_TLS_LE_HIX22:
+ case VK_Sparc_TLS_LE_LOX10: break;
+ }
+ fixELFSymbolsInTLSFixupsImpl(getSubExpr(), Asm);
+}
+
+void SparcMCExpr::visitUsedExpr(MCStreamer &Streamer) const {
+ Streamer.visitUsedExpr(*getSubExpr());
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
new file mode 100644
index 000000000000..13f08195c764
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCExpr.h
@@ -0,0 +1,112 @@
+//====- SparcMCExpr.h - Sparc specific MC expression classes --*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes Sparc-specific MCExprs, used for modifiers like
+// "%hi" or "%lo" etc.,
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCEXPR_H
+#define LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCEXPR_H
+
+#include "SparcFixupKinds.h"
+#include "llvm/MC/MCExpr.h"
+
+namespace llvm {
+
+class StringRef;
+class SparcMCExpr : public MCTargetExpr {
+public:
+ enum VariantKind {
+ VK_Sparc_None,
+ VK_Sparc_LO,
+ VK_Sparc_HI,
+ VK_Sparc_H44,
+ VK_Sparc_M44,
+ VK_Sparc_L44,
+ VK_Sparc_HH,
+ VK_Sparc_HM,
+ VK_Sparc_PC22,
+ VK_Sparc_PC10,
+ VK_Sparc_GOT22,
+ VK_Sparc_GOT10,
+ VK_Sparc_WPLT30,
+ VK_Sparc_R_DISP32,
+ VK_Sparc_TLS_GD_HI22,
+ VK_Sparc_TLS_GD_LO10,
+ VK_Sparc_TLS_GD_ADD,
+ VK_Sparc_TLS_GD_CALL,
+ VK_Sparc_TLS_LDM_HI22,
+ VK_Sparc_TLS_LDM_LO10,
+ VK_Sparc_TLS_LDM_ADD,
+ VK_Sparc_TLS_LDM_CALL,
+ VK_Sparc_TLS_LDO_HIX22,
+ VK_Sparc_TLS_LDO_LOX10,
+ VK_Sparc_TLS_LDO_ADD,
+ VK_Sparc_TLS_IE_HI22,
+ VK_Sparc_TLS_IE_LO10,
+ VK_Sparc_TLS_IE_LD,
+ VK_Sparc_TLS_IE_LDX,
+ VK_Sparc_TLS_IE_ADD,
+ VK_Sparc_TLS_LE_HIX22,
+ VK_Sparc_TLS_LE_LOX10
+ };
+
+private:
+ const VariantKind Kind;
+ const MCExpr *Expr;
+
+ explicit SparcMCExpr(VariantKind Kind, const MCExpr *Expr)
+ : Kind(Kind), Expr(Expr) {}
+
+public:
+ /// @name Construction
+ /// @{
+
+ static const SparcMCExpr *create(VariantKind Kind, const MCExpr *Expr,
+ MCContext &Ctx);
+ /// @}
+ /// @name Accessors
+ /// @{
+
+ /// getOpcode - Get the kind of this expression.
+ VariantKind getKind() const { return Kind; }
+
+ /// getSubExpr - Get the child of this expression.
+ const MCExpr *getSubExpr() const { return Expr; }
+
+ /// getFixupKind - Get the fixup kind of this expression.
+ Sparc::Fixups getFixupKind() const { return getFixupKind(Kind); }
+
+ /// @}
+ void printImpl(raw_ostream &OS, const MCAsmInfo *MAI) const override;
+ bool evaluateAsRelocatableImpl(MCValue &Res,
+ const MCAsmLayout *Layout,
+ const MCFixup *Fixup) const override;
+ void visitUsedExpr(MCStreamer &Streamer) const override;
+ MCFragment *findAssociatedFragment() const override {
+ return getSubExpr()->findAssociatedFragment();
+ }
+
+ void fixELFSymbolsInTLSFixups(MCAssembler &Asm) const override;
+
+ static bool classof(const MCExpr *E) {
+ return E->getKind() == MCExpr::Target;
+ }
+
+ static bool classof(const SparcMCExpr *) { return true; }
+
+ static VariantKind parseVariantKind(StringRef name);
+ static bool printVariantKind(raw_ostream &OS, VariantKind Kind);
+ static Sparc::Fixups getFixupKind(VariantKind Kind);
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
new file mode 100644
index 000000000000..bd6596faee5d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.cpp
@@ -0,0 +1,125 @@
+//===-- SparcMCTargetDesc.cpp - Sparc Target Descriptions -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sparc specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcMCTargetDesc.h"
+#include "InstPrinter/SparcInstPrinter.h"
+#include "SparcMCAsmInfo.h"
+#include "SparcTargetStreamer.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define GET_INSTRINFO_MC_DESC
+#include "SparcGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_MC_DESC
+#include "SparcGenSubtargetInfo.inc"
+
+#define GET_REGINFO_MC_DESC
+#include "SparcGenRegisterInfo.inc"
+
+static MCAsmInfo *createSparcMCAsmInfo(const MCRegisterInfo &MRI,
+ const Triple &TT) {
+ MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT);
+ unsigned Reg = MRI.getDwarfRegNum(SP::O6, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 0);
+ MAI->addInitialFrameState(Inst);
+ return MAI;
+}
+
+static MCAsmInfo *createSparcV9MCAsmInfo(const MCRegisterInfo &MRI,
+ const Triple &TT) {
+ MCAsmInfo *MAI = new SparcELFMCAsmInfo(TT);
+ unsigned Reg = MRI.getDwarfRegNum(SP::O6, true);
+ MCCFIInstruction Inst = MCCFIInstruction::createDefCfa(nullptr, Reg, 2047);
+ MAI->addInitialFrameState(Inst);
+ return MAI;
+}
+
+static MCInstrInfo *createSparcMCInstrInfo() {
+ MCInstrInfo *X = new MCInstrInfo();
+ InitSparcMCInstrInfo(X);
+ return X;
+}
+
+static MCRegisterInfo *createSparcMCRegisterInfo(const Triple &TT) {
+ MCRegisterInfo *X = new MCRegisterInfo();
+ InitSparcMCRegisterInfo(X, SP::O7);
+ return X;
+}
+
+static MCSubtargetInfo *
+createSparcMCSubtargetInfo(const Triple &TT, StringRef CPU, StringRef FS) {
+ if (CPU.empty())
+ CPU = (TT.getArch() == Triple::sparcv9) ? "v9" : "v8";
+ return createSparcMCSubtargetInfoImpl(TT, CPU, FS);
+}
+
+static MCTargetStreamer *
+createObjectTargetStreamer(MCStreamer &S, const MCSubtargetInfo &STI) {
+ return new SparcTargetELFStreamer(S);
+}
+
+static MCTargetStreamer *createTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS,
+ MCInstPrinter *InstPrint,
+ bool isVerboseAsm) {
+ return new SparcTargetAsmStreamer(S, OS);
+}
+
+static MCInstPrinter *createSparcMCInstPrinter(const Triple &T,
+ unsigned SyntaxVariant,
+ const MCAsmInfo &MAI,
+ const MCInstrInfo &MII,
+ const MCRegisterInfo &MRI) {
+ return new SparcInstPrinter(MAI, MII, MRI);
+}
+
+extern "C" void LLVMInitializeSparcTargetMC() {
+ // Register the MC asm info.
+ RegisterMCAsmInfoFn X(getTheSparcTarget(), createSparcMCAsmInfo);
+ RegisterMCAsmInfoFn Y(getTheSparcV9Target(), createSparcV9MCAsmInfo);
+ RegisterMCAsmInfoFn Z(getTheSparcelTarget(), createSparcMCAsmInfo);
+
+ for (Target *T :
+ {&getTheSparcTarget(), &getTheSparcV9Target(), &getTheSparcelTarget()}) {
+ // Register the MC instruction info.
+ TargetRegistry::RegisterMCInstrInfo(*T, createSparcMCInstrInfo);
+
+ // Register the MC register info.
+ TargetRegistry::RegisterMCRegInfo(*T, createSparcMCRegisterInfo);
+
+ // Register the MC subtarget info.
+ TargetRegistry::RegisterMCSubtargetInfo(*T, createSparcMCSubtargetInfo);
+
+ // Register the MC Code Emitter.
+ TargetRegistry::RegisterMCCodeEmitter(*T, createSparcMCCodeEmitter);
+
+ // Register the asm backend.
+ TargetRegistry::RegisterMCAsmBackend(*T, createSparcAsmBackend);
+
+ // Register the object target streamer.
+ TargetRegistry::RegisterObjectTargetStreamer(*T,
+ createObjectTargetStreamer);
+
+ // Register the asm streamer.
+ TargetRegistry::RegisterAsmTargetStreamer(*T, createTargetAsmStreamer);
+
+ // Register the MCInstPrinter
+ TargetRegistry::RegisterMCInstPrinter(*T, createSparcMCInstPrinter);
+ }
+}
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
new file mode 100644
index 000000000000..8390198479ba
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcMCTargetDesc.h
@@ -0,0 +1,65 @@
+//===-- SparcMCTargetDesc.h - Sparc Target Descriptions ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sparc specific target descriptions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCTARGETDESC_H
+#define LLVM_LIB_TARGET_SPARC_MCTARGETDESC_SPARCMCTARGETDESC_H
+
+#include "llvm/Support/DataTypes.h"
+
+#include <memory>
+
+namespace llvm {
+class MCAsmBackend;
+class MCCodeEmitter;
+class MCContext;
+class MCInstrInfo;
+class MCObjectWriter;
+class MCRegisterInfo;
+class MCSubtargetInfo;
+class MCTargetOptions;
+class Target;
+class Triple;
+class StringRef;
+class raw_pwrite_stream;
+class raw_ostream;
+
+Target &getTheSparcTarget();
+Target &getTheSparcV9Target();
+Target &getTheSparcelTarget();
+
+MCCodeEmitter *createSparcMCCodeEmitter(const MCInstrInfo &MCII,
+ const MCRegisterInfo &MRI,
+ MCContext &Ctx);
+MCAsmBackend *createSparcAsmBackend(const Target &T, const MCSubtargetInfo &STI,
+ const MCRegisterInfo &MRI,
+ const MCTargetOptions &Options);
+std::unique_ptr<MCObjectWriter>
+createSparcELFObjectWriter(raw_pwrite_stream &OS, bool Is64Bit,
+ bool IsLIttleEndian, uint8_t OSABI);
+} // End llvm namespace
+
+// Defines symbolic names for Sparc registers. This defines a mapping from
+// register name to register number.
+//
+#define GET_REGINFO_ENUM
+#include "SparcGenRegisterInfo.inc"
+
+// Defines symbolic names for the Sparc instructions.
+//
+#define GET_INSTRINFO_ENUM
+#include "SparcGenInstrInfo.inc"
+
+#define GET_SUBTARGETINFO_ENUM
+#include "SparcGenSubtargetInfo.inc"
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp
new file mode 100644
index 000000000000..94af791e0e75
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/MCTargetDesc/SparcTargetStreamer.cpp
@@ -0,0 +1,46 @@
+//===-- SparcTargetStreamer.cpp - Sparc Target Streamer Methods -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sparc specific target streamer methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcTargetStreamer.h"
+#include "InstPrinter/SparcInstPrinter.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+
+// pin vtable to this file
+SparcTargetStreamer::SparcTargetStreamer(MCStreamer &S) : MCTargetStreamer(S) {}
+
+void SparcTargetStreamer::anchor() {}
+
+SparcTargetAsmStreamer::SparcTargetAsmStreamer(MCStreamer &S,
+ formatted_raw_ostream &OS)
+ : SparcTargetStreamer(S), OS(OS) {}
+
+void SparcTargetAsmStreamer::emitSparcRegisterIgnore(unsigned reg) {
+ OS << "\t.register "
+ << "%" << StringRef(SparcInstPrinter::getRegisterName(reg)).lower()
+ << ", #ignore\n";
+}
+
+void SparcTargetAsmStreamer::emitSparcRegisterScratch(unsigned reg) {
+ OS << "\t.register "
+ << "%" << StringRef(SparcInstPrinter::getRegisterName(reg)).lower()
+ << ", #scratch\n";
+}
+
+SparcTargetELFStreamer::SparcTargetELFStreamer(MCStreamer &S)
+ : SparcTargetStreamer(S) {}
+
+MCELFStreamer &SparcTargetELFStreamer::getStreamer() {
+ return static_cast<MCELFStreamer &>(Streamer);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.h b/contrib/llvm/lib/Target/Sparc/Sparc.h
new file mode 100644
index 000000000000..4135e4e1b61d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.h
@@ -0,0 +1,167 @@
+//===-- Sparc.h - Top-level interface for Sparc representation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the entry points for global functions defined in the LLVM
+// Sparc back-end.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARC_H
+#define LLVM_LIB_TARGET_SPARC_SPARC_H
+
+#include "MCTargetDesc/SparcMCTargetDesc.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+ class FunctionPass;
+ class SparcTargetMachine;
+ class formatted_raw_ostream;
+ class AsmPrinter;
+ class MCInst;
+ class MachineInstr;
+
+ FunctionPass *createSparcISelDag(SparcTargetMachine &TM);
+ FunctionPass *createSparcDelaySlotFillerPass();
+
+ void LowerSparcMachineInstrToMCInst(const MachineInstr *MI,
+ MCInst &OutMI,
+ AsmPrinter &AP);
+} // end namespace llvm;
+
+namespace llvm {
+ // Enums corresponding to Sparc condition codes, both icc's and fcc's. These
+ // values must be kept in sync with the ones in the .td file.
+ namespace SPCC {
+ enum CondCodes {
+ ICC_A = 8 , // Always
+ ICC_N = 0 , // Never
+ ICC_NE = 9 , // Not Equal
+ ICC_E = 1 , // Equal
+ ICC_G = 10 , // Greater
+ ICC_LE = 2 , // Less or Equal
+ ICC_GE = 11 , // Greater or Equal
+ ICC_L = 3 , // Less
+ ICC_GU = 12 , // Greater Unsigned
+ ICC_LEU = 4 , // Less or Equal Unsigned
+ ICC_CC = 13 , // Carry Clear/Great or Equal Unsigned
+ ICC_CS = 5 , // Carry Set/Less Unsigned
+ ICC_POS = 14 , // Positive
+ ICC_NEG = 6 , // Negative
+ ICC_VC = 15 , // Overflow Clear
+ ICC_VS = 7 , // Overflow Set
+
+ FCC_A = 8+16, // Always
+ FCC_N = 0+16, // Never
+ FCC_U = 7+16, // Unordered
+ FCC_G = 6+16, // Greater
+ FCC_UG = 5+16, // Unordered or Greater
+ FCC_L = 4+16, // Less
+ FCC_UL = 3+16, // Unordered or Less
+ FCC_LG = 2+16, // Less or Greater
+ FCC_NE = 1+16, // Not Equal
+ FCC_E = 9+16, // Equal
+ FCC_UE = 10+16, // Unordered or Equal
+ FCC_GE = 11+16, // Greater or Equal
+ FCC_UGE = 12+16, // Unordered or Greater or Equal
+ FCC_LE = 13+16, // Less or Equal
+ FCC_ULE = 14+16, // Unordered or Less or Equal
+ FCC_O = 15+16, // Ordered
+
+ CPCC_A = 8+32, // Always
+ CPCC_N = 0+32, // Never
+ CPCC_3 = 7+32,
+ CPCC_2 = 6+32,
+ CPCC_23 = 5+32,
+ CPCC_1 = 4+32,
+ CPCC_13 = 3+32,
+ CPCC_12 = 2+32,
+ CPCC_123 = 1+32,
+ CPCC_0 = 9+32,
+ CPCC_03 = 10+32,
+ CPCC_02 = 11+32,
+ CPCC_023 = 12+32,
+ CPCC_01 = 13+32,
+ CPCC_013 = 14+32,
+ CPCC_012 = 15+32
+ };
+ }
+
+ inline static const char *SPARCCondCodeToString(SPCC::CondCodes CC) {
+ switch (CC) {
+ case SPCC::ICC_A: return "a";
+ case SPCC::ICC_N: return "n";
+ case SPCC::ICC_NE: return "ne";
+ case SPCC::ICC_E: return "e";
+ case SPCC::ICC_G: return "g";
+ case SPCC::ICC_LE: return "le";
+ case SPCC::ICC_GE: return "ge";
+ case SPCC::ICC_L: return "l";
+ case SPCC::ICC_GU: return "gu";
+ case SPCC::ICC_LEU: return "leu";
+ case SPCC::ICC_CC: return "cc";
+ case SPCC::ICC_CS: return "cs";
+ case SPCC::ICC_POS: return "pos";
+ case SPCC::ICC_NEG: return "neg";
+ case SPCC::ICC_VC: return "vc";
+ case SPCC::ICC_VS: return "vs";
+ case SPCC::FCC_A: return "a";
+ case SPCC::FCC_N: return "n";
+ case SPCC::FCC_U: return "u";
+ case SPCC::FCC_G: return "g";
+ case SPCC::FCC_UG: return "ug";
+ case SPCC::FCC_L: return "l";
+ case SPCC::FCC_UL: return "ul";
+ case SPCC::FCC_LG: return "lg";
+ case SPCC::FCC_NE: return "ne";
+ case SPCC::FCC_E: return "e";
+ case SPCC::FCC_UE: return "ue";
+ case SPCC::FCC_GE: return "ge";
+ case SPCC::FCC_UGE: return "uge";
+ case SPCC::FCC_LE: return "le";
+ case SPCC::FCC_ULE: return "ule";
+ case SPCC::FCC_O: return "o";
+ case SPCC::CPCC_A: return "a";
+ case SPCC::CPCC_N: return "n";
+ case SPCC::CPCC_3: return "3";
+ case SPCC::CPCC_2: return "2";
+ case SPCC::CPCC_23: return "23";
+ case SPCC::CPCC_1: return "1";
+ case SPCC::CPCC_13: return "13";
+ case SPCC::CPCC_12: return "12";
+ case SPCC::CPCC_123: return "123";
+ case SPCC::CPCC_0: return "0";
+ case SPCC::CPCC_03: return "03";
+ case SPCC::CPCC_02: return "02";
+ case SPCC::CPCC_023: return "023";
+ case SPCC::CPCC_01: return "01";
+ case SPCC::CPCC_013: return "013";
+ case SPCC::CPCC_012: return "012";
+ }
+ llvm_unreachable("Invalid cond code");
+ }
+
+ inline static unsigned HI22(int64_t imm) {
+ return (unsigned)((imm >> 10) & ((1 << 22)-1));
+ }
+
+ inline static unsigned LO10(int64_t imm) {
+ return (unsigned)(imm & 0x3FF);
+ }
+
+ inline static unsigned HIX22(int64_t imm) {
+ return HI22(~imm);
+ }
+
+ inline static unsigned LOX10(int64_t imm) {
+ return ~LO10(~imm);
+ }
+
+} // end namespace llvm
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/Sparc.td b/contrib/llvm/lib/Target/Sparc/Sparc.td
new file mode 100644
index 000000000000..9e0a297c8812
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/Sparc.td
@@ -0,0 +1,179 @@
+//===-- Sparc.td - Describe the Sparc Target Machine -------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Target-independent interfaces which we are implementing
+//===----------------------------------------------------------------------===//
+
+include "llvm/Target/Target.td"
+
+//===----------------------------------------------------------------------===//
+// SPARC Subtarget features.
+//
+
+def FeatureSoftMulDiv
+ : SubtargetFeature<"soft-mul-div", "UseSoftMulDiv", "true",
+ "Use software emulation for integer multiply and divide">;
+
+def FeatureNoFSMULD
+ : SubtargetFeature<"no-fsmuld", "HasNoFSMULD", "true",
+ "Disable the fsmuld instruction.">;
+def FeatureNoFMULS
+ : SubtargetFeature<"no-fmuls", "HasNoFMULS", "true",
+ "Disable the fmuls instruction.">;
+
+def FeatureV9
+ : SubtargetFeature<"v9", "IsV9", "true",
+ "Enable SPARC-V9 instructions">;
+def FeatureV8Deprecated
+ : SubtargetFeature<"deprecated-v8", "V8DeprecatedInsts", "true",
+ "Enable deprecated V8 instructions in V9 mode">;
+def FeatureVIS
+ : SubtargetFeature<"vis", "IsVIS", "true",
+ "Enable UltraSPARC Visual Instruction Set extensions">;
+def FeatureVIS2
+ : SubtargetFeature<"vis2", "IsVIS2", "true",
+ "Enable Visual Instruction Set extensions II">;
+def FeatureVIS3
+ : SubtargetFeature<"vis3", "IsVIS3", "true",
+ "Enable Visual Instruction Set extensions III">;
+def FeatureLeon
+ : SubtargetFeature<"leon", "IsLeon", "true",
+ "Enable LEON extensions">;
+
+def FeatureHardQuad
+ : SubtargetFeature<"hard-quad-float", "HasHardQuad", "true",
+ "Enable quad-word floating point instructions">;
+
+def UsePopc : SubtargetFeature<"popc", "UsePopc", "true",
+ "Use the popc (population count) instruction">;
+
+def FeatureSoftFloat : SubtargetFeature<"soft-float", "UseSoftFloat", "true",
+ "Use software emulation for floating point">;
+
+//==== Features added predmoninantly for LEON subtarget support
+include "LeonFeatures.td"
+
+//===----------------------------------------------------------------------===//
+// Register File, Calling Conv, Instruction Descriptions
+//===----------------------------------------------------------------------===//
+
+include "SparcRegisterInfo.td"
+include "SparcCallingConv.td"
+include "SparcSchedule.td"
+include "SparcInstrInfo.td"
+
+def SparcInstrInfo : InstrInfo;
+
+def SparcAsmParser : AsmParser {
+ bit ShouldEmitMatchRegisterName = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// SPARC processors supported.
+//===----------------------------------------------------------------------===//
+
+class Proc<string Name, list<SubtargetFeature> Features>
+ : Processor<Name, NoItineraries, Features>;
+
+def : Proc<"generic", []>;
+def : Proc<"v7", [FeatureSoftMulDiv, FeatureNoFSMULD]>;
+def : Proc<"v8", []>;
+def : Proc<"supersparc", []>;
+def : Proc<"sparclite", []>;
+def : Proc<"f934", []>;
+def : Proc<"hypersparc", []>;
+def : Proc<"sparclite86x", []>;
+def : Proc<"sparclet", []>;
+def : Proc<"tsc701", []>;
+def : Proc<"myriad2", [FeatureLeon, LeonCASA]>;
+def : Proc<"myriad2.1", [FeatureLeon, LeonCASA]>;
+def : Proc<"myriad2.2", [FeatureLeon, LeonCASA]>;
+def : Proc<"myriad2.3", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2100", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2150", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2155", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2450", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2455", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2x5x", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2080", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2085", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2480", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2485", [FeatureLeon, LeonCASA]>;
+def : Proc<"ma2x8x", [FeatureLeon, LeonCASA]>;
+def : Proc<"v9", [FeatureV9]>;
+def : Proc<"ultrasparc", [FeatureV9, FeatureV8Deprecated, FeatureVIS]>;
+def : Proc<"ultrasparc3", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
+ FeatureVIS2]>;
+def : Proc<"niagara", [FeatureV9, FeatureV8Deprecated, FeatureVIS,
+ FeatureVIS2]>;
+def : Proc<"niagara2", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2]>;
+def : Proc<"niagara3", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2]>;
+def : Proc<"niagara4", [FeatureV9, FeatureV8Deprecated, UsePopc,
+ FeatureVIS, FeatureVIS2, FeatureVIS3]>;
+
+// LEON 2 FT generic
+def : Processor<"leon2", LEON2Itineraries,
+ [FeatureLeon]>;
+
+// LEON 2 FT (AT697E)
+// TO DO: Place-holder: Processor specific features will be added *very* soon here.
+def : Processor<"at697e", LEON2Itineraries,
+ [FeatureLeon, ReplaceSDIV, InsertNOPLoad]>;
+
+// LEON 2 FT (AT697F)
+// TO DO: Place-holder: Processor specific features will be added *very* soon here.
+def : Processor<"at697f", LEON2Itineraries,
+ [FeatureLeon, InsertNOPLoad]>;
+
+
+// LEON 3 FT generic
+def : Processor<"leon3", LEON3Itineraries,
+ [FeatureLeon, UMACSMACSupport]>;
+
+// LEON 3 FT (UT699). Provides features for the UT699 processor
+// - covers all the erratum fixes for LEON3, but does not support the CASA instruction.
+def : Processor<"ut699", LEON3Itineraries,
+ [FeatureLeon, InsertNOPLoad, FeatureNoFSMULD, FeatureNoFMULS, FixAllFDIVSQRT]>;
+
+// LEON3 FT (GR712RC). Provides features for the GR712RC processor.
+// - covers all the erratum fixed for LEON3 and support for the CASA instruction.
+def : Processor<"gr712rc", LEON3Itineraries,
+ [FeatureLeon, LeonCASA]>;
+
+// LEON 4 FT generic
+def : Processor<"leon4", LEON4Itineraries,
+ [FeatureLeon, UMACSMACSupport, LeonCASA]>;
+
+// LEON 4 FT (GR740)
+// TO DO: Place-holder: Processor specific features will be added *very* soon here.
+def : Processor<"gr740", LEON4Itineraries,
+ [FeatureLeon, UMACSMACSupport, LeonCASA]>;
+
+//===----------------------------------------------------------------------===//
+// Declare the target which we are implementing
+//===----------------------------------------------------------------------===//
+
+def SparcAsmWriter : AsmWriter {
+ string AsmWriterClassName = "InstPrinter";
+ int PassSubtarget = 1;
+ int Variant = 0;
+}
+
+def Sparc : Target {
+ // Pull in Instruction Info:
+ let InstructionSet = SparcInstrInfo;
+ let AssemblyParsers = [SparcAsmParser];
+ let AssemblyWriters = [SparcAsmWriter];
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
new file mode 100644
index 000000000000..19fb94534b25
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcAsmPrinter.cpp
@@ -0,0 +1,449 @@
+//===-- SparcAsmPrinter.cpp - Sparc LLVM assembly writer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains a printer that converts from our internal representation
+// of machine-dependent LLVM code to GAS-format SPARC assembly language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "InstPrinter/SparcInstPrinter.h"
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "Sparc.h"
+#include "SparcInstrInfo.h"
+#include "SparcTargetMachine.h"
+#include "SparcTargetStreamer.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSymbol.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "asm-printer"
+
+namespace {
+ class SparcAsmPrinter : public AsmPrinter {
+ SparcTargetStreamer &getTargetStreamer() {
+ return static_cast<SparcTargetStreamer &>(
+ *OutStreamer->getTargetStreamer());
+ }
+ public:
+ explicit SparcAsmPrinter(TargetMachine &TM,
+ std::unique_ptr<MCStreamer> Streamer)
+ : AsmPrinter(TM, std::move(Streamer)) {}
+
+ StringRef getPassName() const override { return "Sparc Assembly Printer"; }
+
+ void printOperand(const MachineInstr *MI, int opNum, raw_ostream &OS);
+ void printMemOperand(const MachineInstr *MI, int opNum, raw_ostream &OS,
+ const char *Modifier = nullptr);
+
+ void EmitFunctionBodyStart() override;
+ void EmitInstruction(const MachineInstr *MI) override;
+
+ static const char *getRegisterName(unsigned RegNo) {
+ return SparcInstPrinter::getRegisterName(RegNo);
+ }
+
+ bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+ bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant, const char *ExtraCode,
+ raw_ostream &O) override;
+
+ void LowerGETPCXAndEmitMCInsts(const MachineInstr *MI,
+ const MCSubtargetInfo &STI);
+
+ };
+} // end of anonymous namespace
+
+static MCOperand createSparcMCOperand(SparcMCExpr::VariantKind Kind,
+ MCSymbol *Sym, MCContext &OutContext) {
+ const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::create(Sym,
+ OutContext);
+ const SparcMCExpr *expr = SparcMCExpr::create(Kind, MCSym, OutContext);
+ return MCOperand::createExpr(expr);
+
+}
+static MCOperand createPCXCallOP(MCSymbol *Label,
+ MCContext &OutContext) {
+ return createSparcMCOperand(SparcMCExpr::VK_Sparc_None, Label, OutContext);
+}
+
+static MCOperand createPCXRelExprOp(SparcMCExpr::VariantKind Kind,
+ MCSymbol *GOTLabel, MCSymbol *StartLabel,
+ MCSymbol *CurLabel,
+ MCContext &OutContext)
+{
+ const MCSymbolRefExpr *GOT = MCSymbolRefExpr::create(GOTLabel, OutContext);
+ const MCSymbolRefExpr *Start = MCSymbolRefExpr::create(StartLabel,
+ OutContext);
+ const MCSymbolRefExpr *Cur = MCSymbolRefExpr::create(CurLabel,
+ OutContext);
+
+ const MCBinaryExpr *Sub = MCBinaryExpr::createSub(Cur, Start, OutContext);
+ const MCBinaryExpr *Add = MCBinaryExpr::createAdd(GOT, Sub, OutContext);
+ const SparcMCExpr *expr = SparcMCExpr::create(Kind,
+ Add, OutContext);
+ return MCOperand::createExpr(expr);
+}
+
+static void EmitCall(MCStreamer &OutStreamer,
+ MCOperand &Callee,
+ const MCSubtargetInfo &STI)
+{
+ MCInst CallInst;
+ CallInst.setOpcode(SP::CALL);
+ CallInst.addOperand(Callee);
+ OutStreamer.EmitInstruction(CallInst, STI);
+}
+
+static void EmitSETHI(MCStreamer &OutStreamer,
+ MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI)
+{
+ MCInst SETHIInst;
+ SETHIInst.setOpcode(SP::SETHIi);
+ SETHIInst.addOperand(RD);
+ SETHIInst.addOperand(Imm);
+ OutStreamer.EmitInstruction(SETHIInst, STI);
+}
+
+static void EmitBinary(MCStreamer &OutStreamer, unsigned Opcode,
+ MCOperand &RS1, MCOperand &Src2, MCOperand &RD,
+ const MCSubtargetInfo &STI)
+{
+ MCInst Inst;
+ Inst.setOpcode(Opcode);
+ Inst.addOperand(RD);
+ Inst.addOperand(RS1);
+ Inst.addOperand(Src2);
+ OutStreamer.EmitInstruction(Inst, STI);
+}
+
+static void EmitOR(MCStreamer &OutStreamer,
+ MCOperand &RS1, MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::ORri, RS1, Imm, RD, STI);
+}
+
+static void EmitADD(MCStreamer &OutStreamer,
+ MCOperand &RS1, MCOperand &RS2, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::ADDrr, RS1, RS2, RD, STI);
+}
+
+static void EmitSHL(MCStreamer &OutStreamer,
+ MCOperand &RS1, MCOperand &Imm, MCOperand &RD,
+ const MCSubtargetInfo &STI) {
+ EmitBinary(OutStreamer, SP::SLLri, RS1, Imm, RD, STI);
+}
+
+
+static void EmitHiLo(MCStreamer &OutStreamer, MCSymbol *GOTSym,
+ SparcMCExpr::VariantKind HiKind,
+ SparcMCExpr::VariantKind LoKind,
+ MCOperand &RD,
+ MCContext &OutContext,
+ const MCSubtargetInfo &STI) {
+
+ MCOperand hi = createSparcMCOperand(HiKind, GOTSym, OutContext);
+ MCOperand lo = createSparcMCOperand(LoKind, GOTSym, OutContext);
+ EmitSETHI(OutStreamer, hi, RD, STI);
+ EmitOR(OutStreamer, RD, lo, RD, STI);
+}
+
+void SparcAsmPrinter::LowerGETPCXAndEmitMCInsts(const MachineInstr *MI,
+ const MCSubtargetInfo &STI)
+{
+ MCSymbol *GOTLabel =
+ OutContext.getOrCreateSymbol(Twine("_GLOBAL_OFFSET_TABLE_"));
+
+ const MachineOperand &MO = MI->getOperand(0);
+ assert(MO.getReg() != SP::O7 &&
+ "%o7 is assigned as destination for getpcx!");
+
+ MCOperand MCRegOP = MCOperand::createReg(MO.getReg());
+
+
+ if (!isPositionIndependent()) {
+ // Just load the address of GOT to MCRegOP.
+ switch(TM.getCodeModel()) {
+ default:
+ llvm_unreachable("Unsupported absolute code model");
+ case CodeModel::Small:
+ EmitHiLo(*OutStreamer, GOTLabel,
+ SparcMCExpr::VK_Sparc_HI, SparcMCExpr::VK_Sparc_LO,
+ MCRegOP, OutContext, STI);
+ break;
+ case CodeModel::Medium: {
+ EmitHiLo(*OutStreamer, GOTLabel,
+ SparcMCExpr::VK_Sparc_H44, SparcMCExpr::VK_Sparc_M44,
+ MCRegOP, OutContext, STI);
+ MCOperand imm = MCOperand::createExpr(MCConstantExpr::create(12,
+ OutContext));
+ EmitSHL(*OutStreamer, MCRegOP, imm, MCRegOP, STI);
+ MCOperand lo = createSparcMCOperand(SparcMCExpr::VK_Sparc_L44,
+ GOTLabel, OutContext);
+ EmitOR(*OutStreamer, MCRegOP, lo, MCRegOP, STI);
+ break;
+ }
+ case CodeModel::Large: {
+ EmitHiLo(*OutStreamer, GOTLabel,
+ SparcMCExpr::VK_Sparc_HH, SparcMCExpr::VK_Sparc_HM,
+ MCRegOP, OutContext, STI);
+ MCOperand imm = MCOperand::createExpr(MCConstantExpr::create(32,
+ OutContext));
+ EmitSHL(*OutStreamer, MCRegOP, imm, MCRegOP, STI);
+ // Use register %o7 to load the lower 32 bits.
+ MCOperand RegO7 = MCOperand::createReg(SP::O7);
+ EmitHiLo(*OutStreamer, GOTLabel,
+ SparcMCExpr::VK_Sparc_HI, SparcMCExpr::VK_Sparc_LO,
+ RegO7, OutContext, STI);
+ EmitADD(*OutStreamer, MCRegOP, RegO7, MCRegOP, STI);
+ }
+ }
+ return;
+ }
+
+ MCSymbol *StartLabel = OutContext.createTempSymbol();
+ MCSymbol *EndLabel = OutContext.createTempSymbol();
+ MCSymbol *SethiLabel = OutContext.createTempSymbol();
+
+ MCOperand RegO7 = MCOperand::createReg(SP::O7);
+
+ // <StartLabel>:
+ // call <EndLabel>
+ // <SethiLabel>:
+ // sethi %hi(_GLOBAL_OFFSET_TABLE_+(<SethiLabel>-<StartLabel>)), <MO>
+ // <EndLabel>:
+ // or <MO>, %lo(_GLOBAL_OFFSET_TABLE_+(<EndLabel>-<StartLabel>))), <MO>
+ // add <MO>, %o7, <MO>
+
+ OutStreamer->EmitLabel(StartLabel);
+ MCOperand Callee = createPCXCallOP(EndLabel, OutContext);
+ EmitCall(*OutStreamer, Callee, STI);
+ OutStreamer->EmitLabel(SethiLabel);
+ MCOperand hiImm = createPCXRelExprOp(SparcMCExpr::VK_Sparc_PC22,
+ GOTLabel, StartLabel, SethiLabel,
+ OutContext);
+ EmitSETHI(*OutStreamer, hiImm, MCRegOP, STI);
+ OutStreamer->EmitLabel(EndLabel);
+ MCOperand loImm = createPCXRelExprOp(SparcMCExpr::VK_Sparc_PC10,
+ GOTLabel, StartLabel, EndLabel,
+ OutContext);
+ EmitOR(*OutStreamer, MCRegOP, loImm, MCRegOP, STI);
+ EmitADD(*OutStreamer, MCRegOP, RegO7, MCRegOP, STI);
+}
+
+void SparcAsmPrinter::EmitInstruction(const MachineInstr *MI)
+{
+
+ switch (MI->getOpcode()) {
+ default: break;
+ case TargetOpcode::DBG_VALUE:
+ // FIXME: Debug Value.
+ return;
+ case SP::GETPCX:
+ LowerGETPCXAndEmitMCInsts(MI, getSubtargetInfo());
+ return;
+ }
+ MachineBasicBlock::const_instr_iterator I = MI->getIterator();
+ MachineBasicBlock::const_instr_iterator E = MI->getParent()->instr_end();
+ do {
+ MCInst TmpInst;
+ LowerSparcMachineInstrToMCInst(&*I, TmpInst, *this);
+ EmitToStreamer(*OutStreamer, TmpInst);
+ } while ((++I != E) && I->isInsideBundle()); // Delay slot check.
+}
+
+void SparcAsmPrinter::EmitFunctionBodyStart() {
+ if (!MF->getSubtarget<SparcSubtarget>().is64Bit())
+ return;
+
+ const MachineRegisterInfo &MRI = MF->getRegInfo();
+ const unsigned globalRegs[] = { SP::G2, SP::G3, SP::G6, SP::G7, 0 };
+ for (unsigned i = 0; globalRegs[i] != 0; ++i) {
+ unsigned reg = globalRegs[i];
+ if (MRI.use_empty(reg))
+ continue;
+
+ if (reg == SP::G6 || reg == SP::G7)
+ getTargetStreamer().emitSparcRegisterIgnore(reg);
+ else
+ getTargetStreamer().emitSparcRegisterScratch(reg);
+ }
+}
+
+void SparcAsmPrinter::printOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O) {
+ const DataLayout &DL = getDataLayout();
+ const MachineOperand &MO = MI->getOperand (opNum);
+ SparcMCExpr::VariantKind TF = (SparcMCExpr::VariantKind) MO.getTargetFlags();
+
+#ifndef NDEBUG
+ // Verify the target flags.
+ if (MO.isGlobal() || MO.isSymbol() || MO.isCPI()) {
+ if (MI->getOpcode() == SP::CALL)
+ assert(TF == SparcMCExpr::VK_Sparc_None &&
+ "Cannot handle target flags on call address");
+ else if (MI->getOpcode() == SP::SETHIi || MI->getOpcode() == SP::SETHIXi)
+ assert((TF == SparcMCExpr::VK_Sparc_HI
+ || TF == SparcMCExpr::VK_Sparc_H44
+ || TF == SparcMCExpr::VK_Sparc_HH
+ || TF == SparcMCExpr::VK_Sparc_TLS_GD_HI22
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDM_HI22
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDO_HIX22
+ || TF == SparcMCExpr::VK_Sparc_TLS_IE_HI22
+ || TF == SparcMCExpr::VK_Sparc_TLS_LE_HIX22) &&
+ "Invalid target flags for address operand on sethi");
+ else if (MI->getOpcode() == SP::TLS_CALL)
+ assert((TF == SparcMCExpr::VK_Sparc_None
+ || TF == SparcMCExpr::VK_Sparc_TLS_GD_CALL
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDM_CALL) &&
+ "Cannot handle target flags on tls call address");
+ else if (MI->getOpcode() == SP::TLS_ADDrr)
+ assert((TF == SparcMCExpr::VK_Sparc_TLS_GD_ADD
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDM_ADD
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDO_ADD
+ || TF == SparcMCExpr::VK_Sparc_TLS_IE_ADD) &&
+ "Cannot handle target flags on add for TLS");
+ else if (MI->getOpcode() == SP::TLS_LDrr)
+ assert(TF == SparcMCExpr::VK_Sparc_TLS_IE_LD &&
+ "Cannot handle target flags on ld for TLS");
+ else if (MI->getOpcode() == SP::TLS_LDXrr)
+ assert(TF == SparcMCExpr::VK_Sparc_TLS_IE_LDX &&
+ "Cannot handle target flags on ldx for TLS");
+ else if (MI->getOpcode() == SP::XORri || MI->getOpcode() == SP::XORXri)
+ assert((TF == SparcMCExpr::VK_Sparc_TLS_LDO_LOX10
+ || TF == SparcMCExpr::VK_Sparc_TLS_LE_LOX10) &&
+ "Cannot handle target flags on xor for TLS");
+ else
+ assert((TF == SparcMCExpr::VK_Sparc_LO
+ || TF == SparcMCExpr::VK_Sparc_M44
+ || TF == SparcMCExpr::VK_Sparc_L44
+ || TF == SparcMCExpr::VK_Sparc_HM
+ || TF == SparcMCExpr::VK_Sparc_TLS_GD_LO10
+ || TF == SparcMCExpr::VK_Sparc_TLS_LDM_LO10
+ || TF == SparcMCExpr::VK_Sparc_TLS_IE_LO10 ) &&
+ "Invalid target flags for small address operand");
+ }
+#endif
+
+
+ bool CloseParen = SparcMCExpr::printVariantKind(O, TF);
+
+ switch (MO.getType()) {
+ case MachineOperand::MO_Register:
+ O << "%" << StringRef(getRegisterName(MO.getReg())).lower();
+ break;
+
+ case MachineOperand::MO_Immediate:
+ O << (int)MO.getImm();
+ break;
+ case MachineOperand::MO_MachineBasicBlock:
+ MO.getMBB()->getSymbol()->print(O, MAI);
+ return;
+ case MachineOperand::MO_GlobalAddress:
+ getSymbol(MO.getGlobal())->print(O, MAI);
+ break;
+ case MachineOperand::MO_BlockAddress:
+ O << GetBlockAddressSymbol(MO.getBlockAddress())->getName();
+ break;
+ case MachineOperand::MO_ExternalSymbol:
+ O << MO.getSymbolName();
+ break;
+ case MachineOperand::MO_ConstantPoolIndex:
+ O << DL.getPrivateGlobalPrefix() << "CPI" << getFunctionNumber() << "_"
+ << MO.getIndex();
+ break;
+ case MachineOperand::MO_Metadata:
+ MO.getMetadata()->printAsOperand(O, MMI->getModule());
+ break;
+ default:
+ llvm_unreachable("<unknown operand type>");
+ }
+ if (CloseParen) O << ")";
+}
+
+void SparcAsmPrinter::printMemOperand(const MachineInstr *MI, int opNum,
+ raw_ostream &O, const char *Modifier) {
+ printOperand(MI, opNum, O);
+
+ // If this is an ADD operand, emit it like normal operands.
+ if (Modifier && !strcmp(Modifier, "arith")) {
+ O << ", ";
+ printOperand(MI, opNum+1, O);
+ return;
+ }
+
+ if (MI->getOperand(opNum+1).isReg() &&
+ MI->getOperand(opNum+1).getReg() == SP::G0)
+ return; // don't print "+%g0"
+ if (MI->getOperand(opNum+1).isImm() &&
+ MI->getOperand(opNum+1).getImm() == 0)
+ return; // don't print "+0"
+
+ O << "+";
+ printOperand(MI, opNum+1, O);
+}
+
+/// PrintAsmOperand - Print out an operand for an inline asm expression.
+///
+bool SparcAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
+ unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0]) {
+ if (ExtraCode[1] != 0) return true; // Unknown modifier.
+
+ switch (ExtraCode[0]) {
+ default:
+ // See if this is a generic print operand
+ return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
+ case 'f':
+ case 'r':
+ break;
+ }
+ }
+
+ printOperand(MI, OpNo, O);
+
+ return false;
+}
+
+bool SparcAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
+ unsigned OpNo, unsigned AsmVariant,
+ const char *ExtraCode,
+ raw_ostream &O) {
+ if (ExtraCode && ExtraCode[0])
+ return true; // Unknown modifier
+
+ O << '[';
+ printMemOperand(MI, OpNo, O);
+ O << ']';
+
+ return false;
+}
+
+// Force static initialization.
+extern "C" void LLVMInitializeSparcAsmPrinter() {
+ RegisterAsmPrinter<SparcAsmPrinter> X(getTheSparcTarget());
+ RegisterAsmPrinter<SparcAsmPrinter> Y(getTheSparcV9Target());
+ RegisterAsmPrinter<SparcAsmPrinter> Z(getTheSparcelTarget());
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
new file mode 100644
index 000000000000..0aa29d186dc1
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcCallingConv.td
@@ -0,0 +1,144 @@
+//===-- SparcCallingConv.td - Calling Conventions Sparc ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This describes the calling conventions for the Sparc architectures.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// SPARC v8 32-bit.
+//===----------------------------------------------------------------------===//
+
+def CC_Sparc32 : CallingConv<[
+ // Custom assign SRet to [sp+64].
+ CCIfSRet<CCCustom<"CC_Sparc_Assign_SRet">>,
+ // i32 f32 arguments get passed in integer registers if there is space.
+ CCIfType<[i32, f32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
+ // f64 arguments are split and passed through registers or through stack.
+ CCIfType<[f64], CCCustom<"CC_Sparc_Assign_Split_64">>,
+ // As are v2i32 arguments (this would be the default behavior for
+ // v2i32 if it wasn't allocated to the IntPair register-class)
+ CCIfType<[v2i32], CCCustom<"CC_Sparc_Assign_Split_64">>,
+
+
+ // Alternatively, they are assigned to the stack in 4-byte aligned units.
+ CCAssignToStack<4, 4>
+]>;
+
+def RetCC_Sparc32 : CallingConv<[
+ CCIfType<[i32], CCAssignToReg<[I0, I1, I2, I3, I4, I5]>>,
+ CCIfType<[f32], CCAssignToReg<[F0, F1, F2, F3]>>,
+ CCIfType<[f64], CCAssignToReg<[D0, D1]>>,
+ CCIfType<[v2i32], CCCustom<"CC_Sparc_Assign_Ret_Split_64">>
+]>;
+
+
+//===----------------------------------------------------------------------===//
+// SPARC v9 64-bit.
+//===----------------------------------------------------------------------===//
+//
+// The 64-bit ABI conceptually assigns all function arguments to a parameter
+// array starting at [%fp+BIAS+128] in the callee's stack frame. All arguments
+// occupy a multiple of 8 bytes in the array. Integer arguments are extended to
+// 64 bits by the caller. Floats are right-aligned in their 8-byte slot, the
+// first 4 bytes in the slot are undefined.
+//
+// The integer registers %i0 to %i5 shadow the first 48 bytes of the parameter
+// array at fixed offsets. Integer arguments are promoted to registers when
+// possible.
+//
+// The floating point registers %f0 to %f31 shadow the first 128 bytes of the
+// parameter array at fixed offsets. Float and double parameters are promoted
+// to these registers when possible.
+//
+// Structs up to 16 bytes in size are passed by value. They are right-aligned
+// in one or two 8-byte slots in the parameter array. Struct members are
+// promoted to both floating point and integer registers when possible. A
+// struct containing two floats would thus be passed in %f0 and %f1, while two
+// float function arguments would occupy 8 bytes each, and be passed in %f1 and
+// %f3.
+//
+// When a struct { int, float } is passed by value, the int goes in the high
+// bits of an integer register while the float goes in a floating point
+// register.
+//
+// The difference is encoded in LLVM IR using the inreg atttribute on function
+// arguments:
+//
+// C: void f(float, float);
+// IR: declare void f(float %f1, float %f3)
+//
+// C: void f(struct { float f0, f1; });
+// IR: declare void f(float inreg %f0, float inreg %f1)
+//
+// C: void f(int, float);
+// IR: declare void f(int signext %i0, float %f3)
+//
+// C: void f(struct { int i0high; float f1; });
+// IR: declare void f(i32 inreg %i0high, float inreg %f1)
+//
+// Two ints in a struct are simply coerced to i64:
+//
+// C: void f(struct { int i0high, i0low; });
+// IR: declare void f(i64 %i0.coerced)
+//
+// The frontend and backend divide the task of producing ABI compliant code for
+// C functions. The C frontend will:
+//
+// - Annotate integer arguments with zeroext or signext attributes.
+//
+// - Split structs into one or two 64-bit sized chunks, or 32-bit chunks with
+// inreg attributes.
+//
+// - Pass structs larger than 16 bytes indirectly with an explicit pointer
+// argument. The byval attribute is not used.
+//
+// The backend will:
+//
+// - Assign all arguments to 64-bit aligned stack slots, 32-bits for inreg.
+//
+// - Promote to integer or floating point registers depending on type.
+//
+// Function return values are passed exactly like function arguments, except a
+// struct up to 32 bytes in size can be returned in registers.
+
+// Function arguments AND most return values.
+def CC_Sparc64 : CallingConv<[
+ // The frontend uses the inreg flag to indicate i32 and float arguments from
+ // structs. These arguments are not promoted to 64 bits, but they can still
+ // be assigned to integer and float registers.
+ CCIfInReg<CCIfType<[i32, f32], CCCustom<"CC_Sparc64_Half">>>,
+
+ // All integers are promoted to i64 by the caller.
+ CCIfType<[i32], CCPromoteToType<i64>>,
+
+ // Custom assignment is required because stack space is reserved for all
+ // arguments whether they are passed in registers or not.
+ CCCustom<"CC_Sparc64_Full">
+]>;
+
+def RetCC_Sparc64 : CallingConv<[
+ // A single f32 return value always goes in %f0. The ABI doesn't specify what
+ // happens to multiple f32 return values outside a struct.
+ CCIfType<[f32], CCCustom<"CC_Sparc64_Half">>,
+
+ // Otherwise, return values are passed exactly like arguments.
+ CCDelegateTo<CC_Sparc64>
+]>;
+
+// Callee-saved registers are handled by the register window mechanism.
+def CSR : CalleeSavedRegs<(add)> {
+ let OtherPreserved = (add (sequence "I%u", 0, 7),
+ (sequence "L%u", 0, 7));
+}
+
+// Callee-saved registers for calls with ReturnsTwice attribute.
+def RTCSR : CalleeSavedRegs<(add)> {
+ let OtherPreserved = (add I6, I7);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
new file mode 100644
index 000000000000..9864aa372354
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.cpp
@@ -0,0 +1,367 @@
+//===-- SparcFrameLowering.cpp - Sparc Frame Information ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Sparc implementation of TargetFrameLowering class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcFrameLowering.h"
+#include "SparcInstrInfo.h"
+#include "SparcMachineFunctionInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineModuleInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/Function.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Target/TargetOptions.h"
+
+using namespace llvm;
+
+static cl::opt<bool>
+DisableLeafProc("disable-sparc-leaf-proc",
+ cl::init(false),
+ cl::desc("Disable Sparc leaf procedure optimization."),
+ cl::Hidden);
+
+SparcFrameLowering::SparcFrameLowering(const SparcSubtarget &ST)
+ : TargetFrameLowering(TargetFrameLowering::StackGrowsDown,
+ ST.is64Bit() ? 16 : 8, 0, ST.is64Bit() ? 16 : 8) {}
+
+void SparcFrameLowering::emitSPAdjustment(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ int NumBytes,
+ unsigned ADDrr,
+ unsigned ADDri) const {
+
+ DebugLoc dl;
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo *>(MF.getSubtarget().getInstrInfo());
+
+ if (NumBytes >= -4096 && NumBytes < 4096) {
+ BuildMI(MBB, MBBI, dl, TII.get(ADDri), SP::O6)
+ .addReg(SP::O6).addImm(NumBytes);
+ return;
+ }
+
+ // Emit this the hard way. This clobbers G1 which we always know is
+ // available here.
+ if (NumBytes >= 0) {
+ // Emit nonnegative numbers with sethi + or.
+ // sethi %hi(NumBytes), %g1
+ // or %g1, %lo(NumBytes), %g1
+ // add %sp, %g1, %sp
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HI22(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(SP::ORri), SP::G1)
+ .addReg(SP::G1).addImm(LO10(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
+ .addReg(SP::O6).addReg(SP::G1);
+ return ;
+ }
+
+ // Emit negative numbers with sethi + xor.
+ // sethi %hix(NumBytes), %g1
+ // xor %g1, %lox(NumBytes), %g1
+ // add %sp, %g1, %sp
+ BuildMI(MBB, MBBI, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HIX22(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(SP::XORri), SP::G1)
+ .addReg(SP::G1).addImm(LOX10(NumBytes));
+ BuildMI(MBB, MBBI, dl, TII.get(ADDrr), SP::O6)
+ .addReg(SP::O6).addReg(SP::G1);
+}
+
+void SparcFrameLowering::emitPrologue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+
+ assert(&MF.front() == &MBB && "Shrink-wrapping not yet supported");
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ const SparcRegisterInfo &RegInfo =
+ *static_cast<const SparcRegisterInfo *>(MF.getSubtarget().getRegisterInfo());
+ MachineBasicBlock::iterator MBBI = MBB.begin();
+ // Debug location must be unknown since the first debug location is used
+ // to determine the end of the prologue.
+ DebugLoc dl;
+ bool NeedsStackRealignment = RegInfo.needsStackRealignment(MF);
+
+ // FIXME: unfortunately, returning false from canRealignStack
+ // actually just causes needsStackRealignment to return false,
+ // rather than reporting an error, as would be sensible. This is
+ // poor, but fixing that bogosity is going to be a large project.
+ // For now, just see if it's lied, and report an error here.
+ if (!NeedsStackRealignment && MFI.getMaxAlignment() > getStackAlignment())
+ report_fatal_error("Function \"" + Twine(MF.getName()) + "\" required "
+ "stack re-alignment, but LLVM couldn't handle it "
+ "(probably because it has a dynamic alloca).");
+
+ // Get the number of bytes to allocate from the FrameInfo
+ int NumBytes = (int) MFI.getStackSize();
+
+ unsigned SAVEri = SP::SAVEri;
+ unsigned SAVErr = SP::SAVErr;
+ if (FuncInfo->isLeafProc()) {
+ if (NumBytes == 0)
+ return;
+ SAVEri = SP::ADDri;
+ SAVErr = SP::ADDrr;
+ }
+
+ // The SPARC ABI is a bit odd in that it requires a reserved 92-byte
+ // (128 in v9) area in the user's stack, starting at %sp. Thus, the
+ // first part of the stack that can actually be used is located at
+ // %sp + 92.
+ //
+ // We therefore need to add that offset to the total stack size
+ // after all the stack objects are placed by
+ // PrologEpilogInserter calculateFrameObjectOffsets. However, since the stack needs to be
+ // aligned *after* the extra size is added, we need to disable
+ // calculateFrameObjectOffsets's built-in stack alignment, by having
+ // targetHandlesStackFrameRounding return true.
+
+
+ // Add the extra call frame stack size, if needed. (This is the same
+ // code as in PrologEpilogInserter, but also gets disabled by
+ // targetHandlesStackFrameRounding)
+ if (MFI.adjustsStack() && hasReservedCallFrame(MF))
+ NumBytes += MFI.getMaxCallFrameSize();
+
+ // Adds the SPARC subtarget-specific spill area to the stack
+ // size. Also ensures target-required alignment.
+ NumBytes = MF.getSubtarget<SparcSubtarget>().getAdjustedFrameSize(NumBytes);
+
+ // Finally, ensure that the size is sufficiently aligned for the
+ // data on the stack.
+ if (MFI.getMaxAlignment() > 0) {
+ NumBytes = alignTo(NumBytes, MFI.getMaxAlignment());
+ }
+
+ // Update stack size with corrected value.
+ MFI.setStackSize(NumBytes);
+
+ emitSPAdjustment(MF, MBB, MBBI, -NumBytes, SAVErr, SAVEri);
+
+ unsigned regFP = RegInfo.getDwarfRegNum(SP::I6, true);
+
+ // Emit ".cfi_def_cfa_register 30".
+ unsigned CFIIndex =
+ MF.addFrameInst(MCCFIInstruction::createDefCfaRegister(nullptr, regFP));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ // Emit ".cfi_window_save".
+ CFIIndex = MF.addFrameInst(MCCFIInstruction::createWindowSave(nullptr));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ unsigned regInRA = RegInfo.getDwarfRegNum(SP::I7, true);
+ unsigned regOutRA = RegInfo.getDwarfRegNum(SP::O7, true);
+ // Emit ".cfi_register 15, 31".
+ CFIIndex = MF.addFrameInst(
+ MCCFIInstruction::createRegister(nullptr, regOutRA, regInRA));
+ BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
+ .addCFIIndex(CFIIndex);
+
+ if (NeedsStackRealignment) {
+ // andn %o6, MaxAlign-1, %o6
+ int MaxAlign = MFI.getMaxAlignment();
+ BuildMI(MBB, MBBI, dl, TII.get(SP::ANDNri), SP::O6).addReg(SP::O6).addImm(MaxAlign - 1);
+ }
+}
+
+MachineBasicBlock::iterator SparcFrameLowering::
+eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const {
+ if (!hasReservedCallFrame(MF)) {
+ MachineInstr &MI = *I;
+ int Size = MI.getOperand(0).getImm();
+ if (MI.getOpcode() == SP::ADJCALLSTACKDOWN)
+ Size = -Size;
+
+ if (Size)
+ emitSPAdjustment(MF, MBB, I, Size, SP::ADDrr, SP::ADDri);
+ }
+ return MBB.erase(I);
+}
+
+
+void SparcFrameLowering::emitEpilogue(MachineFunction &MF,
+ MachineBasicBlock &MBB) const {
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
+ const SparcInstrInfo &TII =
+ *static_cast<const SparcInstrInfo *>(MF.getSubtarget().getInstrInfo());
+ DebugLoc dl = MBBI->getDebugLoc();
+ assert(MBBI->getOpcode() == SP::RETL &&
+ "Can only put epilog before 'retl' instruction!");
+ if (!FuncInfo->isLeafProc()) {
+ BuildMI(MBB, MBBI, dl, TII.get(SP::RESTORErr), SP::G0).addReg(SP::G0)
+ .addReg(SP::G0);
+ return;
+ }
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ int NumBytes = (int) MFI.getStackSize();
+ if (NumBytes == 0)
+ return;
+
+ emitSPAdjustment(MF, MBB, MBBI, NumBytes, SP::ADDrr, SP::ADDri);
+}
+
+bool SparcFrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
+ // Reserve call frame if there are no variable sized objects on the stack.
+ return !MF.getFrameInfo().hasVarSizedObjects();
+}
+
+// hasFP - Return true if the specified function should have a dedicated frame
+// pointer register. This is true if the function has variable sized allocas or
+// if frame pointer elimination is disabled.
+bool SparcFrameLowering::hasFP(const MachineFunction &MF) const {
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ return MF.getTarget().Options.DisableFramePointerElim(MF) ||
+ RegInfo->needsStackRealignment(MF) ||
+ MFI.hasVarSizedObjects() ||
+ MFI.isFrameAddressTaken();
+}
+
+
+int SparcFrameLowering::getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const {
+ const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
+ const MachineFrameInfo &MFI = MF.getFrameInfo();
+ const SparcRegisterInfo *RegInfo = Subtarget.getRegisterInfo();
+ const SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ bool isFixed = MFI.isFixedObjectIndex(FI);
+
+ // Addressable stack objects are accessed using neg. offsets from
+ // %fp, or positive offsets from %sp.
+ bool UseFP;
+
+ // Sparc uses FP-based references in general, even when "hasFP" is
+ // false. That function is rather a misnomer, because %fp is
+ // actually always available, unless isLeafProc.
+ if (FuncInfo->isLeafProc()) {
+ // If there's a leaf proc, all offsets need to be %sp-based,
+ // because we haven't caused %fp to actually point to our frame.
+ UseFP = false;
+ } else if (isFixed) {
+ // Otherwise, argument access should always use %fp.
+ UseFP = true;
+ } else if (RegInfo->needsStackRealignment(MF)) {
+ // If there is dynamic stack realignment, all local object
+ // references need to be via %sp, to take account of the
+ // re-alignment.
+ UseFP = false;
+ } else {
+ // Finally, default to using %fp.
+ UseFP = true;
+ }
+
+ int64_t FrameOffset = MF.getFrameInfo().getObjectOffset(FI) +
+ Subtarget.getStackPointerBias();
+
+ if (UseFP) {
+ FrameReg = RegInfo->getFrameRegister(MF);
+ return FrameOffset;
+ } else {
+ FrameReg = SP::O6; // %sp
+ return FrameOffset + MF.getFrameInfo().getStackSize();
+ }
+}
+
+static bool LLVM_ATTRIBUTE_UNUSED verifyLeafProcRegUse(MachineRegisterInfo *MRI)
+{
+
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg)
+ if (MRI->isPhysRegUsed(reg))
+ return false;
+
+ for (unsigned reg = SP::L0; reg <= SP::L7; ++reg)
+ if (MRI->isPhysRegUsed(reg))
+ return false;
+
+ return true;
+}
+
+bool SparcFrameLowering::isLeafProc(MachineFunction &MF) const
+{
+
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+
+ return !(MFI.hasCalls() // has calls
+ || MRI.isPhysRegUsed(SP::L0) // Too many registers needed
+ || MRI.isPhysRegUsed(SP::O6) // %sp is used
+ || hasFP(MF)); // need %fp
+}
+
+void SparcFrameLowering::remapRegsForLeafProc(MachineFunction &MF) const {
+ MachineRegisterInfo &MRI = MF.getRegInfo();
+ // Remap %i[0-7] to %o[0-7].
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
+ if (!MRI.isPhysRegUsed(reg))
+ continue;
+
+ unsigned mapped_reg = reg - SP::I0 + SP::O0;
+
+ // Replace I register with O register.
+ MRI.replaceRegWith(reg, mapped_reg);
+
+ // Also replace register pair super-registers.
+ if ((reg - SP::I0) % 2 == 0) {
+ unsigned preg = (reg - SP::I0) / 2 + SP::I0_I1;
+ unsigned mapped_preg = preg - SP::I0_I1 + SP::O0_O1;
+ MRI.replaceRegWith(preg, mapped_preg);
+ }
+ }
+
+ // Rewrite MBB's Live-ins.
+ for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
+ MBB != E; ++MBB) {
+ for (unsigned reg = SP::I0_I1; reg <= SP::I6_I7; ++reg) {
+ if (!MBB->isLiveIn(reg))
+ continue;
+ MBB->removeLiveIn(reg);
+ MBB->addLiveIn(reg - SP::I0_I1 + SP::O0_O1);
+ }
+ for (unsigned reg = SP::I0; reg <= SP::I7; ++reg) {
+ if (!MBB->isLiveIn(reg))
+ continue;
+ MBB->removeLiveIn(reg);
+ MBB->addLiveIn(reg - SP::I0 + SP::O0);
+ }
+ }
+
+ assert(verifyLeafProcRegUse(&MRI));
+#ifdef EXPENSIVE_CHECKS
+ MF.verify(0, "After LeafProc Remapping");
+#endif
+}
+
+void SparcFrameLowering::determineCalleeSaves(MachineFunction &MF,
+ BitVector &SavedRegs,
+ RegScavenger *RS) const {
+ TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
+ if (!DisableLeafProc && isLeafProc(MF)) {
+ SparcMachineFunctionInfo *MFI = MF.getInfo<SparcMachineFunctionInfo>();
+ MFI->setLeafProc(true);
+
+ remapRegsForLeafProc(MF);
+ }
+
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
new file mode 100644
index 000000000000..6098afa68985
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcFrameLowering.h
@@ -0,0 +1,68 @@
+//===-- SparcFrameLowering.h - Define frame lowering for Sparc --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCFRAMELOWERING_H
+#define LLVM_LIB_TARGET_SPARC_SPARCFRAMELOWERING_H
+
+#include "Sparc.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+
+namespace llvm {
+
+class SparcSubtarget;
+class SparcFrameLowering : public TargetFrameLowering {
+public:
+ explicit SparcFrameLowering(const SparcSubtarget &ST);
+
+ /// emitProlog/emitEpilog - These methods insert prolog and epilog code into
+ /// the function.
+ void emitPrologue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+ void emitEpilogue(MachineFunction &MF, MachineBasicBlock &MBB) const override;
+
+ MachineBasicBlock::iterator
+ eliminateCallFramePseudoInstr(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I) const override;
+
+ bool hasReservedCallFrame(const MachineFunction &MF) const override;
+ bool hasFP(const MachineFunction &MF) const override;
+ void determineCalleeSaves(MachineFunction &MF, BitVector &SavedRegs,
+ RegScavenger *RS = nullptr) const override;
+
+ int getFrameIndexReference(const MachineFunction &MF, int FI,
+ unsigned &FrameReg) const override;
+
+ /// targetHandlesStackFrameRounding - Returns true if the target is
+ /// responsible for rounding up the stack frame (probably at emitPrologue
+ /// time).
+ bool targetHandlesStackFrameRounding() const override { return true; }
+
+private:
+ // Remap input registers to output registers for leaf procedure.
+ void remapRegsForLeafProc(MachineFunction &MF) const;
+
+ // Returns true if MF is a leaf procedure.
+ bool isLeafProc(MachineFunction &MF) const;
+
+
+ // Emits code for adjusting SP in function prologue/epilogue.
+ void emitSPAdjustment(MachineFunction &MF,
+ MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ int NumBytes, unsigned ADDrr, unsigned ADDri) const;
+
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
new file mode 100644
index 000000000000..c36e75d1b076
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp
@@ -0,0 +1,405 @@
+//===-- SparcISelDAGToDAG.cpp - A dag to dag inst selector for Sparc ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an instruction selector for the SPARC target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcTargetMachine.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAGISel.h"
+#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Instruction Selector Implementation
+//===----------------------------------------------------------------------===//
+
+//===--------------------------------------------------------------------===//
+/// SparcDAGToDAGISel - SPARC specific code to select SPARC machine
+/// instructions for SelectionDAG operations.
+///
+namespace {
+class SparcDAGToDAGISel : public SelectionDAGISel {
+ /// Subtarget - Keep a pointer to the Sparc Subtarget around so that we can
+ /// make the right decision when generating code for different targets.
+ const SparcSubtarget *Subtarget;
+public:
+ explicit SparcDAGToDAGISel(SparcTargetMachine &tm) : SelectionDAGISel(tm) {}
+
+ bool runOnMachineFunction(MachineFunction &MF) override {
+ Subtarget = &MF.getSubtarget<SparcSubtarget>();
+ return SelectionDAGISel::runOnMachineFunction(MF);
+ }
+
+ void Select(SDNode *N) override;
+
+ // Complex Pattern Selectors.
+ bool SelectADDRrr(SDValue N, SDValue &R1, SDValue &R2);
+ bool SelectADDRri(SDValue N, SDValue &Base, SDValue &Offset);
+
+ /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+ /// inline asm expressions.
+ bool SelectInlineAsmMemoryOperand(const SDValue &Op,
+ unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) override;
+
+ StringRef getPassName() const override {
+ return "SPARC DAG->DAG Pattern Instruction Selection";
+ }
+
+ // Include the pieces autogenerated from the target description.
+#include "SparcGenDAGISel.inc"
+
+private:
+ SDNode* getGlobalBaseReg();
+ bool tryInlineAsm(SDNode *N);
+};
+} // end anonymous namespace
+
+SDNode* SparcDAGToDAGISel::getGlobalBaseReg() {
+ unsigned GlobalBaseReg = Subtarget->getInstrInfo()->getGlobalBaseReg(MF);
+ return CurDAG->getRegister(GlobalBaseReg,
+ TLI->getPointerTy(CurDAG->getDataLayout()))
+ .getNode();
+}
+
+bool SparcDAGToDAGISel::SelectADDRri(SDValue Addr,
+ SDValue &Base, SDValue &Offset) {
+ if (FrameIndexSDNode *FIN = dyn_cast<FrameIndexSDNode>(Addr)) {
+ Base = CurDAG->getTargetFrameIndex(
+ FIN->getIndex(), TLI->getPointerTy(CurDAG->getDataLayout()));
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+ }
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1))) {
+ if (isInt<13>(CN->getSExtValue())) {
+ if (FrameIndexSDNode *FIN =
+ dyn_cast<FrameIndexSDNode>(Addr.getOperand(0))) {
+ // Constant offset from frame ref.
+ Base = CurDAG->getTargetFrameIndex(
+ FIN->getIndex(), TLI->getPointerTy(CurDAG->getDataLayout()));
+ } else {
+ Base = Addr.getOperand(0);
+ }
+ Offset = CurDAG->getTargetConstant(CN->getZExtValue(), SDLoc(Addr),
+ MVT::i32);
+ return true;
+ }
+ }
+ if (Addr.getOperand(0).getOpcode() == SPISD::Lo) {
+ Base = Addr.getOperand(1);
+ Offset = Addr.getOperand(0).getOperand(0);
+ return true;
+ }
+ if (Addr.getOperand(1).getOpcode() == SPISD::Lo) {
+ Base = Addr.getOperand(0);
+ Offset = Addr.getOperand(1).getOperand(0);
+ return true;
+ }
+ }
+ Base = Addr;
+ Offset = CurDAG->getTargetConstant(0, SDLoc(Addr), MVT::i32);
+ return true;
+}
+
+bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) {
+ if (Addr.getOpcode() == ISD::FrameIndex) return false;
+ if (Addr.getOpcode() == ISD::TargetExternalSymbol ||
+ Addr.getOpcode() == ISD::TargetGlobalAddress ||
+ Addr.getOpcode() == ISD::TargetGlobalTLSAddress)
+ return false; // direct calls.
+
+ if (Addr.getOpcode() == ISD::ADD) {
+ if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))
+ if (isInt<13>(CN->getSExtValue()))
+ return false; // Let the reg+imm pattern catch this!
+ if (Addr.getOperand(0).getOpcode() == SPISD::Lo ||
+ Addr.getOperand(1).getOpcode() == SPISD::Lo)
+ return false; // Let the reg+imm pattern catch this!
+ R1 = Addr.getOperand(0);
+ R2 = Addr.getOperand(1);
+ return true;
+ }
+
+ R1 = Addr;
+ R2 = CurDAG->getRegister(SP::G0, TLI->getPointerTy(CurDAG->getDataLayout()));
+ return true;
+}
+
+
+// Re-assemble i64 arguments split up in SelectionDAGBuilder's
+// visitInlineAsm / GetRegistersForValue functions.
+//
+// Note: This function was copied from, and is essentially identical
+// to ARMISelDAGToDAG::SelectInlineAsm. It is very unfortunate that
+// such hacking-up is necessary; a rethink of how inline asm operands
+// are handled may be in order to make doing this more sane.
+//
+// TODO: fix inline asm support so I can simply tell it that 'i64'
+// inputs to asm need to be allocated to the IntPair register type,
+// and have that work. Then, delete this function.
+bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){
+ std::vector<SDValue> AsmNodeOperands;
+ unsigned Flag, Kind;
+ bool Changed = false;
+ unsigned NumOps = N->getNumOperands();
+
+ // Normally, i64 data is bounded to two arbitrary GPRs for "%r"
+ // constraint. However, some instructions (e.g. ldd/std) require
+ // (even/even+1) GPRs.
+
+ // So, here, we check for this case, and mutate the inlineasm to use
+ // a single IntPair register instead, which guarantees such even/odd
+ // placement.
+
+ SDLoc dl(N);
+ SDValue Glue = N->getGluedNode() ? N->getOperand(NumOps-1)
+ : SDValue(nullptr,0);
+
+ SmallVector<bool, 8> OpChanged;
+ // Glue node will be appended late.
+ for(unsigned i = 0, e = N->getGluedNode() ? NumOps - 1 : NumOps; i < e; ++i) {
+ SDValue op = N->getOperand(i);
+ AsmNodeOperands.push_back(op);
+
+ if (i < InlineAsm::Op_FirstOperand)
+ continue;
+
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(i))) {
+ Flag = C->getZExtValue();
+ Kind = InlineAsm::getKind(Flag);
+ }
+ else
+ continue;
+
+ // Immediate operands to inline asm in the SelectionDAG are modeled with
+ // two operands. The first is a constant of value InlineAsm::Kind_Imm, and
+ // the second is a constant with the value of the immediate. If we get here
+ // and we have a Kind_Imm, skip the next operand, and continue.
+ if (Kind == InlineAsm::Kind_Imm) {
+ SDValue op = N->getOperand(++i);
+ AsmNodeOperands.push_back(op);
+ continue;
+ }
+
+ unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag);
+ if (NumRegs)
+ OpChanged.push_back(false);
+
+ unsigned DefIdx = 0;
+ bool IsTiedToChangedOp = false;
+ // If it's a use that is tied with a previous def, it has no
+ // reg class constraint.
+ if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx))
+ IsTiedToChangedOp = OpChanged[DefIdx];
+
+ if (Kind != InlineAsm::Kind_RegUse && Kind != InlineAsm::Kind_RegDef
+ && Kind != InlineAsm::Kind_RegDefEarlyClobber)
+ continue;
+
+ unsigned RC;
+ bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC);
+ if ((!IsTiedToChangedOp && (!HasRC || RC != SP::IntRegsRegClassID))
+ || NumRegs != 2)
+ continue;
+
+ assert((i+2 < NumOps) && "Invalid number of operands in inline asm");
+ SDValue V0 = N->getOperand(i+1);
+ SDValue V1 = N->getOperand(i+2);
+ unsigned Reg0 = cast<RegisterSDNode>(V0)->getReg();
+ unsigned Reg1 = cast<RegisterSDNode>(V1)->getReg();
+ SDValue PairedReg;
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+
+ if (Kind == InlineAsm::Kind_RegDef ||
+ Kind == InlineAsm::Kind_RegDefEarlyClobber) {
+ // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to
+ // the original GPRs.
+
+ unsigned GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass);
+ PairedReg = CurDAG->getRegister(GPVR, MVT::v2i32);
+ SDValue Chain = SDValue(N,0);
+
+ SDNode *GU = N->getGluedUser();
+ SDValue RegCopy = CurDAG->getCopyFromReg(Chain, dl, GPVR, MVT::v2i32,
+ Chain.getValue(1));
+
+ // Extract values from a GPRPair reg and copy to the original GPR reg.
+ SDValue Sub0 = CurDAG->getTargetExtractSubreg(SP::sub_even, dl, MVT::i32,
+ RegCopy);
+ SDValue Sub1 = CurDAG->getTargetExtractSubreg(SP::sub_odd, dl, MVT::i32,
+ RegCopy);
+ SDValue T0 = CurDAG->getCopyToReg(Sub0, dl, Reg0, Sub0,
+ RegCopy.getValue(1));
+ SDValue T1 = CurDAG->getCopyToReg(Sub1, dl, Reg1, Sub1, T0.getValue(1));
+
+ // Update the original glue user.
+ std::vector<SDValue> Ops(GU->op_begin(), GU->op_end()-1);
+ Ops.push_back(T1.getValue(1));
+ CurDAG->UpdateNodeOperands(GU, Ops);
+ }
+ else {
+ // For Kind == InlineAsm::Kind_RegUse, we first copy two GPRs into a
+ // GPRPair and then pass the GPRPair to the inline asm.
+ SDValue Chain = AsmNodeOperands[InlineAsm::Op_InputChain];
+
+ // As REG_SEQ doesn't take RegisterSDNode, we copy them first.
+ SDValue T0 = CurDAG->getCopyFromReg(Chain, dl, Reg0, MVT::i32,
+ Chain.getValue(1));
+ SDValue T1 = CurDAG->getCopyFromReg(Chain, dl, Reg1, MVT::i32,
+ T0.getValue(1));
+ SDValue Pair = SDValue(
+ CurDAG->getMachineNode(
+ TargetOpcode::REG_SEQUENCE, dl, MVT::v2i32,
+ {
+ CurDAG->getTargetConstant(SP::IntPairRegClassID, dl,
+ MVT::i32),
+ T0,
+ CurDAG->getTargetConstant(SP::sub_even, dl, MVT::i32),
+ T1,
+ CurDAG->getTargetConstant(SP::sub_odd, dl, MVT::i32),
+ }),
+ 0);
+
+ // Copy REG_SEQ into a GPRPair-typed VR and replace the original two
+ // i32 VRs of inline asm with it.
+ unsigned GPVR = MRI.createVirtualRegister(&SP::IntPairRegClass);
+ PairedReg = CurDAG->getRegister(GPVR, MVT::v2i32);
+ Chain = CurDAG->getCopyToReg(T1, dl, GPVR, Pair, T1.getValue(1));
+
+ AsmNodeOperands[InlineAsm::Op_InputChain] = Chain;
+ Glue = Chain.getValue(1);
+ }
+
+ Changed = true;
+
+ if(PairedReg.getNode()) {
+ OpChanged[OpChanged.size() -1 ] = true;
+ Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/);
+ if (IsTiedToChangedOp)
+ Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx);
+ else
+ Flag = InlineAsm::getFlagWordForRegClass(Flag, SP::IntPairRegClassID);
+ // Replace the current flag.
+ AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant(
+ Flag, dl, MVT::i32);
+ // Add the new register node and skip the original two GPRs.
+ AsmNodeOperands.push_back(PairedReg);
+ // Skip the next two GPRs.
+ i += 2;
+ }
+ }
+
+ if (Glue.getNode())
+ AsmNodeOperands.push_back(Glue);
+ if (!Changed)
+ return false;
+
+ SDValue New = CurDAG->getNode(ISD::INLINEASM, SDLoc(N),
+ CurDAG->getVTList(MVT::Other, MVT::Glue), AsmNodeOperands);
+ New->setNodeId(-1);
+ ReplaceNode(N, New.getNode());
+ return true;
+}
+
+void SparcDAGToDAGISel::Select(SDNode *N) {
+ SDLoc dl(N);
+ if (N->isMachineOpcode()) {
+ N->setNodeId(-1);
+ return; // Already selected.
+ }
+
+ switch (N->getOpcode()) {
+ default: break;
+ case ISD::INLINEASM: {
+ if (tryInlineAsm(N))
+ return;
+ break;
+ }
+ case SPISD::GLOBAL_BASE_REG:
+ ReplaceNode(N, getGlobalBaseReg());
+ return;
+
+ case ISD::SDIV:
+ case ISD::UDIV: {
+ // sdivx / udivx handle 64-bit divides.
+ if (N->getValueType(0) == MVT::i64)
+ break;
+ // FIXME: should use a custom expander to expose the SRA to the dag.
+ SDValue DivLHS = N->getOperand(0);
+ SDValue DivRHS = N->getOperand(1);
+
+ // Set the Y register to the high-part.
+ SDValue TopPart;
+ if (N->getOpcode() == ISD::SDIV) {
+ TopPart = SDValue(CurDAG->getMachineNode(SP::SRAri, dl, MVT::i32, DivLHS,
+ CurDAG->getTargetConstant(31, dl, MVT::i32)),
+ 0);
+ } else {
+ TopPart = CurDAG->getRegister(SP::G0, MVT::i32);
+ }
+ TopPart = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SP::Y, TopPart,
+ SDValue())
+ .getValue(1);
+
+ // FIXME: Handle div by immediate.
+ unsigned Opcode = N->getOpcode() == ISD::SDIV ? SP::SDIVrr : SP::UDIVrr;
+ // SDIV is a hardware erratum on some LEON2 processors. Replace it with SDIVcc here.
+ if (((SparcTargetMachine&)TM).getSubtargetImpl()->performSDIVReplace()
+ &&
+ Opcode == SP::SDIVrr) {
+ Opcode = SP::SDIVCCrr;
+ }
+ CurDAG->SelectNodeTo(N, Opcode, MVT::i32, DivLHS, DivRHS, TopPart);
+ return;
+ }
+ }
+
+ SelectCode(N);
+}
+
+
+/// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
+/// inline asm expressions.
+bool
+SparcDAGToDAGISel::SelectInlineAsmMemoryOperand(const SDValue &Op,
+ unsigned ConstraintID,
+ std::vector<SDValue> &OutOps) {
+ SDValue Op0, Op1;
+ switch (ConstraintID) {
+ default: return true;
+ case InlineAsm::Constraint_i:
+ case InlineAsm::Constraint_o:
+ case InlineAsm::Constraint_m: // memory
+ if (!SelectADDRrr(Op, Op0, Op1))
+ SelectADDRri(Op, Op0, Op1);
+ break;
+ }
+
+ OutOps.push_back(Op0);
+ OutOps.push_back(Op1);
+ return false;
+}
+
+/// createSparcISelDag - This pass converts a legalized DAG into a
+/// SPARC-specific DAG, ready for instruction scheduling.
+///
+FunctionPass *llvm::createSparcISelDag(SparcTargetMachine &TM) {
+ return new SparcDAGToDAGISel(TM);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
new file mode 100644
index 000000000000..d9548ff90d7f
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -0,0 +1,3603 @@
+//===-- SparcISelLowering.cpp - Sparc DAG Lowering Implementation ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interfaces that Sparc uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcISelLowering.h"
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "SparcMachineFunctionInfo.h"
+#include "SparcRegisterInfo.h"
+#include "SparcTargetMachine.h"
+#include "SparcTargetObjectFile.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/CodeGen/CallingConvLower.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/SelectionDAG.h"
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+#include "llvm/IR/DerivedTypes.h"
+#include "llvm/IR/Function.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
+using namespace llvm;
+
+
+//===----------------------------------------------------------------------===//
+// Calling Convention Implementation
+//===----------------------------------------------------------------------===//
+
+static bool CC_Sparc_Assign_SRet(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ assert (ArgFlags.isSRet());
+
+ // Assign SRet argument.
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ 0,
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool CC_Sparc_Assign_Split_64(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ static const MCPhysReg RegList[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+ // Try to get first reg.
+ if (unsigned Reg = State.AllocateReg(RegList)) {
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ } else {
+ // Assign whole thing in stack.
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(8,4),
+ LocVT, LocInfo));
+ return true;
+ }
+
+ // Try to get second reg.
+ if (unsigned Reg = State.AllocateReg(RegList))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
+ State.AllocateStack(4,4),
+ LocVT, LocInfo));
+ return true;
+}
+
+static bool CC_Sparc_Assign_Ret_Split_64(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State)
+{
+ static const MCPhysReg RegList[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+
+ // Try to get first reg.
+ if (unsigned Reg = State.AllocateReg(RegList))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ return false;
+
+ // Try to get second reg.
+ if (unsigned Reg = State.AllocateReg(RegList))
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ else
+ return false;
+
+ return true;
+}
+
+// Allocate a full-sized argument for the 64-bit ABI.
+static bool CC_Sparc64_Full(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ assert((LocVT == MVT::f32 || LocVT == MVT::f128
+ || LocVT.getSizeInBits() == 64) &&
+ "Can't handle non-64 bits locations");
+
+ // Stack space is allocated for all arguments starting from [%fp+BIAS+128].
+ unsigned size = (LocVT == MVT::f128) ? 16 : 8;
+ unsigned alignment = (LocVT == MVT::f128) ? 16 : 8;
+ unsigned Offset = State.AllocateStack(size, alignment);
+ unsigned Reg = 0;
+
+ if (LocVT == MVT::i64 && Offset < 6*8)
+ // Promote integers to %i0-%i5.
+ Reg = SP::I0 + Offset/8;
+ else if (LocVT == MVT::f64 && Offset < 16*8)
+ // Promote doubles to %d0-%d30. (Which LLVM calls D0-D15).
+ Reg = SP::D0 + Offset/8;
+ else if (LocVT == MVT::f32 && Offset < 16*8)
+ // Promote floats to %f1, %f3, ...
+ Reg = SP::F1 + Offset/4;
+ else if (LocVT == MVT::f128 && Offset < 16*8)
+ // Promote long doubles to %q0-%q28. (Which LLVM calls Q0-Q7).
+ Reg = SP::Q0 + Offset/16;
+
+ // Promote to register when possible, otherwise use the stack slot.
+ if (Reg) {
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return true;
+ }
+
+ // This argument goes on the stack in an 8-byte slot.
+ // When passing floats, LocVT is smaller than 8 bytes. Adjust the offset to
+ // the right-aligned float. The first 4 bytes of the stack slot are undefined.
+ if (LocVT == MVT::f32)
+ Offset += 4;
+
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return true;
+}
+
+// Allocate a half-sized argument for the 64-bit ABI.
+//
+// This is used when passing { float, int } structs by value in registers.
+static bool CC_Sparc64_Half(unsigned &ValNo, MVT &ValVT,
+ MVT &LocVT, CCValAssign::LocInfo &LocInfo,
+ ISD::ArgFlagsTy &ArgFlags, CCState &State) {
+ assert(LocVT.getSizeInBits() == 32 && "Can't handle non-32 bits locations");
+ unsigned Offset = State.AllocateStack(4, 4);
+
+ if (LocVT == MVT::f32 && Offset < 16*8) {
+ // Promote floats to %f0-%f31.
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, SP::F0 + Offset/4,
+ LocVT, LocInfo));
+ return true;
+ }
+
+ if (LocVT == MVT::i32 && Offset < 6*8) {
+ // Promote integers to %i0-%i5, using half the register.
+ unsigned Reg = SP::I0 + Offset/8;
+ LocVT = MVT::i64;
+ LocInfo = CCValAssign::AExt;
+
+ // Set the Custom bit if this i32 goes in the high bits of a register.
+ if (Offset % 8 == 0)
+ State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg,
+ LocVT, LocInfo));
+ else
+ State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
+ return true;
+ }
+
+ State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
+ return true;
+}
+
+#include "SparcGenCallingConv.inc"
+
+// The calling conventions in SparcCallingConv.td are described in terms of the
+// callee's register window. This function translates registers to the
+// corresponding caller window %o register.
+static unsigned toCallerWindow(unsigned Reg) {
+ static_assert(SP::I0 + 7 == SP::I7 && SP::O0 + 7 == SP::O7,
+ "Unexpected enum");
+ if (Reg >= SP::I0 && Reg <= SP::I7)
+ return Reg - SP::I0 + SP::O0;
+ return Reg;
+}
+
+SDValue
+SparcTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ if (Subtarget->is64Bit())
+ return LowerReturn_64(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+ return LowerReturn_32(Chain, CallConv, IsVarArg, Outs, OutVals, DL, DAG);
+}
+
+SDValue
+SparcTargetLowering::LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ // Analyze return values.
+ CCInfo.AnalyzeReturn(Outs, RetCC_Sparc32);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+ // Make room for the return address offset.
+ RetOps.push_back(SDValue());
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0, realRVLocIdx = 0;
+ i != RVLocs.size();
+ ++i, ++realRVLocIdx) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+
+ SDValue Arg = OutVals[realRVLocIdx];
+
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::v2i32);
+ // Legalize ret v2i32 -> ret 2 x i32 (Basically: do what would
+ // happen by default if this wasn't a legal type)
+
+ SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
+ Arg,
+ DAG.getConstant(0, DL, getVectorIdxTy(DAG.getDataLayout())));
+ SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32,
+ Arg,
+ DAG.getConstant(1, DL, getVectorIdxTy(DAG.getDataLayout())));
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part0, Flag);
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ VA = RVLocs[++i]; // skip ahead to next loc
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Part1,
+ Flag);
+ } else
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ unsigned RetAddrOffset = 8; // Call Inst + Delay Slot
+ // If the function returns a struct, copy the SRetReturnReg to I0
+ if (MF.getFunction().hasStructRetAttr()) {
+ SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
+ unsigned Reg = SFI->getSRetReturnReg();
+ if (!Reg)
+ llvm_unreachable("sret virtual register not created in the entry block");
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, PtrVT);
+ Chain = DAG.getCopyToReg(Chain, DL, SP::I0, Val, Flag);
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(SP::I0, PtrVT));
+ RetAddrOffset = 12; // CallInst + Delay Slot + Unimp
+ }
+
+ RetOps[0] = Chain; // Update chain.
+ RetOps[1] = DAG.getConstant(RetAddrOffset, DL, MVT::i32);
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
+}
+
+// Lower return values for the 64-bit ABI.
+// Return values are passed the exactly the same way as function arguments.
+SDValue
+SparcTargetLowering::LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const {
+ // CCValAssign - represent the assignment of the return value to locations.
+ SmallVector<CCValAssign, 16> RVLocs;
+
+ // CCState - Info about the registers and stack slot.
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ // Analyze return values.
+ CCInfo.AnalyzeReturn(Outs, RetCC_Sparc64);
+
+ SDValue Flag;
+ SmallVector<SDValue, 4> RetOps(1, Chain);
+
+ // The second operand on the return instruction is the return address offset.
+ // The return address is always %i7+8 with the 64-bit ABI.
+ RetOps.push_back(DAG.getConstant(8, DL, MVT::i32));
+
+ // Copy the result values into the output registers.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ assert(VA.isRegLoc() && "Can only return in registers!");
+ SDValue OutVal = OutVals[i];
+
+ // Integer return values must be sign or zero extended by the callee.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ OutVal = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::ZExt:
+ OutVal = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ case CCValAssign::AExt:
+ OutVal = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), OutVal);
+ break;
+ default:
+ llvm_unreachable("Unknown loc info!");
+ }
+
+ // The custom bit on an i32 return value indicates that it should be passed
+ // in the high bits of the register.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
+ OutVal = DAG.getNode(ISD::SHL, DL, MVT::i64, OutVal,
+ DAG.getConstant(32, DL, MVT::i32));
+
+ // The next value may go in the low bits of the same register.
+ // Handle both at once.
+ if (i+1 < RVLocs.size() && RVLocs[i+1].getLocReg() == VA.getLocReg()) {
+ SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, OutVals[i+1]);
+ OutVal = DAG.getNode(ISD::OR, DL, MVT::i64, OutVal, NV);
+ // Skip the next value, it's already done.
+ ++i;
+ }
+ }
+
+ Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), OutVal, Flag);
+
+ // Guarantee that all emitted copies are stuck together with flags.
+ Flag = Chain.getValue(1);
+ RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
+ }
+
+ RetOps[0] = Chain; // Update chain.
+
+ // Add the flag if we have it.
+ if (Flag.getNode())
+ RetOps.push_back(Flag);
+
+ return DAG.getNode(SPISD::RET_FLAG, DL, MVT::Other, RetOps);
+}
+
+SDValue SparcTargetLowering::LowerFormalArguments(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ if (Subtarget->is64Bit())
+ return LowerFormalArguments_64(Chain, CallConv, IsVarArg, Ins,
+ DL, DAG, InVals);
+ return LowerFormalArguments_32(Chain, CallConv, IsVarArg, Ins,
+ DL, DAG, InVals);
+}
+
+/// LowerFormalArguments32 - V8 uses a very simple ABI, where all values are
+/// passed in either one or two GPRs, including FP values. TODO: we should
+/// pass FP values in FP registers for fastcc functions.
+SDValue SparcTargetLowering::LowerFormalArguments_32(
+ SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &dl,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineRegisterInfo &RegInfo = MF.getRegInfo();
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+
+ // Assign locations to all of the incoming arguments.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc32);
+
+ const unsigned StackOffset = 92;
+ bool IsLittleEndian = DAG.getDataLayout().isLittleEndian();
+
+ unsigned InIdx = 0;
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i, ++InIdx) {
+ CCValAssign &VA = ArgLocs[i];
+
+ if (Ins[InIdx].Flags.isSRet()) {
+ if (InIdx != 0)
+ report_fatal_error("sparc only supports sret on the first parameter");
+ // Get SRet from [%fp+64].
+ int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, 64, true);
+ SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+ SDValue Arg =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
+ InVals.push_back(Arg);
+ continue;
+ }
+
+ if (VA.isRegLoc()) {
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
+
+ unsigned VRegHi = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
+ MF.getRegInfo().addLiveIn(VA.getLocReg(), VRegHi);
+ SDValue HiVal = DAG.getCopyFromReg(Chain, dl, VRegHi, MVT::i32);
+
+ assert(i+1 < e);
+ CCValAssign &NextVA = ArgLocs[++i];
+
+ SDValue LoVal;
+ if (NextVA.isMemLoc()) {
+ int FrameIdx = MF.getFrameInfo().
+ CreateFixedObject(4, StackOffset+NextVA.getLocMemOffset(),true);
+ SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+ LoVal = DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
+ } else {
+ unsigned loReg = MF.addLiveIn(NextVA.getLocReg(),
+ &SP::IntRegsRegClass);
+ LoVal = DAG.getCopyFromReg(Chain, dl, loReg, MVT::i32);
+ }
+
+ if (IsLittleEndian)
+ std::swap(LoVal, HiVal);
+
+ SDValue WholeValue =
+ DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), WholeValue);
+ InVals.push_back(WholeValue);
+ continue;
+ }
+ unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
+ MF.getRegInfo().addLiveIn(VA.getLocReg(), VReg);
+ SDValue Arg = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
+ if (VA.getLocVT() == MVT::f32)
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::f32, Arg);
+ else if (VA.getLocVT() != MVT::i32) {
+ Arg = DAG.getNode(ISD::AssertSext, dl, MVT::i32, Arg,
+ DAG.getValueType(VA.getLocVT()));
+ Arg = DAG.getNode(ISD::TRUNCATE, dl, VA.getLocVT(), Arg);
+ }
+ InVals.push_back(Arg);
+ continue;
+ }
+
+ assert(VA.isMemLoc());
+
+ unsigned Offset = VA.getLocMemOffset()+StackOffset;
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ if (VA.needsCustom()) {
+ assert(VA.getValVT() == MVT::f64 || VA.getValVT() == MVT::v2i32);
+ // If it is double-word aligned, just load.
+ if (Offset % 8 == 0) {
+ int FI = MF.getFrameInfo().CreateFixedObject(8,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
+ SDValue Load =
+ DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
+ InVals.push_back(Load);
+ continue;
+ }
+
+ int FI = MF.getFrameInfo().CreateFixedObject(4,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
+ SDValue HiVal =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr, MachinePointerInfo());
+ int FI2 = MF.getFrameInfo().CreateFixedObject(4,
+ Offset+4,
+ true);
+ SDValue FIPtr2 = DAG.getFrameIndex(FI2, PtrVT);
+
+ SDValue LoVal =
+ DAG.getLoad(MVT::i32, dl, Chain, FIPtr2, MachinePointerInfo());
+
+ if (IsLittleEndian)
+ std::swap(LoVal, HiVal);
+
+ SDValue WholeValue =
+ DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, LoVal, HiVal);
+ WholeValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), WholeValue);
+ InVals.push_back(WholeValue);
+ continue;
+ }
+
+ int FI = MF.getFrameInfo().CreateFixedObject(4,
+ Offset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
+ SDValue Load ;
+ if (VA.getValVT() == MVT::i32 || VA.getValVT() == MVT::f32) {
+ Load = DAG.getLoad(VA.getValVT(), dl, Chain, FIPtr, MachinePointerInfo());
+ } else if (VA.getValVT() == MVT::f128) {
+ report_fatal_error("SPARCv8 does not handle f128 in calls; "
+ "pass indirectly");
+ } else {
+ // We shouldn't see any other value types here.
+ llvm_unreachable("Unexpected ValVT encountered in frame lowering.");
+ }
+ InVals.push_back(Load);
+ }
+
+ if (MF.getFunction().hasStructRetAttr()) {
+ // Copy the SRet Argument to SRetReturnReg.
+ SparcMachineFunctionInfo *SFI = MF.getInfo<SparcMachineFunctionInfo>();
+ unsigned Reg = SFI->getSRetReturnReg();
+ if (!Reg) {
+ Reg = MF.getRegInfo().createVirtualRegister(&SP::IntRegsRegClass);
+ SFI->setSRetReturnReg(Reg);
+ }
+ SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[0]);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
+ }
+
+ // Store remaining ArgRegs to the stack if this is a varargs function.
+ if (isVarArg) {
+ static const MCPhysReg ArgRegs[] = {
+ SP::I0, SP::I1, SP::I2, SP::I3, SP::I4, SP::I5
+ };
+ unsigned NumAllocated = CCInfo.getFirstUnallocated(ArgRegs);
+ const MCPhysReg *CurArgReg = ArgRegs+NumAllocated, *ArgRegEnd = ArgRegs+6;
+ unsigned ArgOffset = CCInfo.getNextStackOffset();
+ if (NumAllocated == 6)
+ ArgOffset += StackOffset;
+ else {
+ assert(!ArgOffset);
+ ArgOffset = 68+4*NumAllocated;
+ }
+
+ // Remember the vararg offset for the va_start implementation.
+ FuncInfo->setVarArgsFrameOffset(ArgOffset);
+
+ std::vector<SDValue> OutChains;
+
+ for (; CurArgReg != ArgRegEnd; ++CurArgReg) {
+ unsigned VReg = RegInfo.createVirtualRegister(&SP::IntRegsRegClass);
+ MF.getRegInfo().addLiveIn(*CurArgReg, VReg);
+ SDValue Arg = DAG.getCopyFromReg(DAG.getRoot(), dl, VReg, MVT::i32);
+
+ int FrameIdx = MF.getFrameInfo().CreateFixedObject(4, ArgOffset,
+ true);
+ SDValue FIPtr = DAG.getFrameIndex(FrameIdx, MVT::i32);
+
+ OutChains.push_back(
+ DAG.getStore(DAG.getRoot(), dl, Arg, FIPtr, MachinePointerInfo()));
+ ArgOffset += 4;
+ }
+
+ if (!OutChains.empty()) {
+ OutChains.push_back(Chain);
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+ }
+ }
+
+ return Chain;
+}
+
+// Lower formal arguments for the 64 bit ABI.
+SDValue SparcTargetLowering::LowerFormalArguments_64(
+ SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
+ SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
+ MachineFunction &MF = DAG.getMachineFunction();
+
+ // Analyze arguments according to CC_Sparc64.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeFormalArguments(Ins, CC_Sparc64);
+
+ // The argument array begins at %fp+BIAS+128, after the register save area.
+ const unsigned ArgArea = 128;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ CCValAssign &VA = ArgLocs[i];
+ if (VA.isRegLoc()) {
+ // This argument is passed in a register.
+ // All integer register arguments are promoted by the caller to i64.
+
+ // Create a virtual register for the promoted live-in value.
+ unsigned VReg = MF.addLiveIn(VA.getLocReg(),
+ getRegClassFor(VA.getLocVT()));
+ SDValue Arg = DAG.getCopyFromReg(Chain, DL, VReg, VA.getLocVT());
+
+ // Get the high bits for i32 struct elements.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom())
+ Arg = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), Arg,
+ DAG.getConstant(32, DL, MVT::i32));
+
+ // The caller promoted the argument, so insert an Assert?ext SDNode so we
+ // won't promote the value again in this function.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), Arg,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), Arg,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ default:
+ break;
+ }
+
+ // Truncate the register down to the argument type.
+ if (VA.isExtInLoc())
+ Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg);
+
+ InVals.push_back(Arg);
+ continue;
+ }
+
+ // The registers are exhausted. This argument was passed on the stack.
+ assert(VA.isMemLoc());
+ // The CC_Sparc64_Full/Half functions compute stack offsets relative to the
+ // beginning of the arguments area at %fp+BIAS+128.
+ unsigned Offset = VA.getLocMemOffset() + ArgArea;
+ unsigned ValSize = VA.getValVT().getSizeInBits() / 8;
+ // Adjust offset for extended arguments, SPARC is big-endian.
+ // The caller will have written the full slot with extended bytes, but we
+ // prefer our own extending loads.
+ if (VA.isExtInLoc())
+ Offset += 8 - ValSize;
+ int FI = MF.getFrameInfo().CreateFixedObject(ValSize, Offset, true);
+ InVals.push_back(
+ DAG.getLoad(VA.getValVT(), DL, Chain,
+ DAG.getFrameIndex(FI, getPointerTy(MF.getDataLayout())),
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ }
+
+ if (!IsVarArg)
+ return Chain;
+
+ // This function takes variable arguments, some of which may have been passed
+ // in registers %i0-%i5. Variable floating point arguments are never passed
+ // in floating point registers. They go on %i0-%i5 or on the stack like
+ // integer arguments.
+ //
+ // The va_start intrinsic needs to know the offset to the first variable
+ // argument.
+ unsigned ArgOffset = CCInfo.getNextStackOffset();
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ // Skip the 128 bytes of register save area.
+ FuncInfo->setVarArgsFrameOffset(ArgOffset + ArgArea +
+ Subtarget->getStackPointerBias());
+
+ // Save the variable arguments that were passed in registers.
+ // The caller is required to reserve stack space for 6 arguments regardless
+ // of how many arguments were actually passed.
+ SmallVector<SDValue, 8> OutChains;
+ for (; ArgOffset < 6*8; ArgOffset += 8) {
+ unsigned VReg = MF.addLiveIn(SP::I0 + ArgOffset/8, &SP::I64RegsRegClass);
+ SDValue VArg = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64);
+ int FI = MF.getFrameInfo().CreateFixedObject(8, ArgOffset + ArgArea, true);
+ auto PtrVT = getPointerTy(MF.getDataLayout());
+ OutChains.push_back(
+ DAG.getStore(Chain, DL, VArg, DAG.getFrameIndex(FI, PtrVT),
+ MachinePointerInfo::getFixedStack(MF, FI)));
+ }
+
+ if (!OutChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
+
+ return Chain;
+}
+
+SDValue
+SparcTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ if (Subtarget->is64Bit())
+ return LowerCall_64(CLI, InVals);
+ return LowerCall_32(CLI, InVals);
+}
+
+static bool hasReturnsTwiceAttr(SelectionDAG &DAG, SDValue Callee,
+ ImmutableCallSite CS) {
+ if (CS)
+ return CS.hasFnAttr(Attribute::ReturnsTwice);
+
+ const Function *CalleeFn = nullptr;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ CalleeFn = dyn_cast<Function>(G->getGlobal());
+ } else if (ExternalSymbolSDNode *E =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const Function &Fn = DAG.getMachineFunction().getFunction();
+ const Module *M = Fn.getParent();
+ const char *CalleeName = E->getSymbol();
+ CalleeFn = M->getFunction(CalleeName);
+ }
+
+ if (!CalleeFn)
+ return false;
+ return CalleeFn->hasFnAttribute(Attribute::ReturnsTwice);
+}
+
+// Lower a call for the 32-bit ABI.
+SDValue
+SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc &dl = CLI.DL;
+ SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
+ SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
+ SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
+ SDValue Chain = CLI.Chain;
+ SDValue Callee = CLI.Callee;
+ bool &isTailCall = CLI.IsTailCall;
+ CallingConv::ID CallConv = CLI.CallConv;
+ bool isVarArg = CLI.IsVarArg;
+
+ // Sparc target does not yet support tail call optimization.
+ isTailCall = false;
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(Outs, CC_Sparc32);
+
+ // Get the size of the outgoing arguments stack space requirement.
+ unsigned ArgsSize = CCInfo.getNextStackOffset();
+
+ // Keep stack frames 8-byte aligned.
+ ArgsSize = (ArgsSize+7) & ~7;
+
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+
+ // Create local copies for byval args.
+ SmallVector<SDValue, 8> ByValArgs;
+ for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
+ ISD::ArgFlagsTy Flags = Outs[i].Flags;
+ if (!Flags.isByVal())
+ continue;
+
+ SDValue Arg = OutVals[i];
+ unsigned Size = Flags.getByValSize();
+ unsigned Align = Flags.getByValAlign();
+
+ if (Size > 0U) {
+ int FI = MFI.CreateStackObject(Size, Align, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ SDValue SizeNode = DAG.getConstant(Size, dl, MVT::i32);
+
+ Chain = DAG.getMemcpy(Chain, dl, FIPtr, Arg, SizeNode, Align,
+ false, // isVolatile,
+ (Size <= 32), // AlwaysInline if size <= 32,
+ false, // isTailCall
+ MachinePointerInfo(), MachinePointerInfo());
+ ByValArgs.push_back(FIPtr);
+ }
+ else {
+ SDValue nullVal;
+ ByValArgs.push_back(nullVal);
+ }
+ }
+
+ Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, dl);
+
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+ SmallVector<SDValue, 8> MemOpChains;
+
+ const unsigned StackOffset = 92;
+ bool hasStructRetAttr = false;
+ // Walk the register/memloc assignments, inserting copies/loads.
+ for (unsigned i = 0, realArgIdx = 0, byvalArgIdx = 0, e = ArgLocs.size();
+ i != e;
+ ++i, ++realArgIdx) {
+ CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = OutVals[realArgIdx];
+
+ ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags;
+
+ // Use local copy if it is a byval arg.
+ if (Flags.isByVal()) {
+ Arg = ByValArgs[byvalArgIdx++];
+ if (!Arg) {
+ continue;
+ }
+ }
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default: llvm_unreachable("Unknown loc info!");
+ case CCValAssign::Full: break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ Arg = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (Flags.isSRet()) {
+ assert(VA.needsCustom());
+ // store SRet argument in %sp+64
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(64, dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
+ hasStructRetAttr = true;
+ continue;
+ }
+
+ if (VA.needsCustom()) {
+ assert(VA.getLocVT() == MVT::f64 || VA.getLocVT() == MVT::v2i32);
+
+ if (VA.isMemLoc()) {
+ unsigned Offset = VA.getLocMemOffset() + StackOffset;
+ // if it is double-word aligned, just store.
+ if (Offset % 8 == 0) {
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
+ continue;
+ }
+ }
+
+ if (VA.getLocVT() == MVT::f64) {
+ // Move from the float value from float registers into the
+ // integer registers.
+
+ // TODO: The f64 -> v2i32 conversion is super-inefficient for
+ // constants: it sticks them in the constant pool, then loads
+ // to a fp register, then stores to temp memory, then loads to
+ // integer registers.
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, Arg);
+ }
+
+ SDValue Part0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Arg,
+ DAG.getConstant(0, dl, getVectorIdxTy(DAG.getDataLayout())));
+ SDValue Part1 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
+ Arg,
+ DAG.getConstant(1, dl, getVectorIdxTy(DAG.getDataLayout())));
+
+ if (VA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Part0));
+ assert(i+1 != e);
+ CCValAssign &NextVA = ArgLocs[++i];
+ if (NextVA.isRegLoc()) {
+ RegsToPass.push_back(std::make_pair(NextVA.getLocReg(), Part1));
+ } else {
+ // Store the second part in stack.
+ unsigned Offset = NextVA.getLocMemOffset() + StackOffset;
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
+ }
+ } else {
+ unsigned Offset = VA.getLocMemOffset() + StackOffset;
+ // Store the first part.
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(Offset, dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part0, PtrOff, MachinePointerInfo()));
+ // Store the second part.
+ PtrOff = DAG.getIntPtrConstant(Offset + 4, dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Part1, PtrOff, MachinePointerInfo()));
+ }
+ continue;
+ }
+
+ // Arguments that can be passed on register must be kept at
+ // RegsToPass vector
+ if (VA.isRegLoc()) {
+ if (VA.getLocVT() != MVT::f32) {
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ continue;
+ }
+ Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i32, Arg);
+ RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
+ continue;
+ }
+
+ assert(VA.isMemLoc());
+
+ // Create a store off the stack pointer for this argument.
+ SDValue StackPtr = DAG.getRegister(SP::O6, MVT::i32);
+ SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() + StackOffset,
+ dl);
+ PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, dl, Arg, PtrOff, MachinePointerInfo()));
+ }
+
+
+ // Emit all stores, make sure the occur before any copies into physregs.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
+
+ // Build a sequence of copy-to-reg nodes chained together with token
+ // chain and flag operands which copy the outgoing args into registers.
+ // The InFlag in necessary since all emitted instructions must be
+ // stuck together.
+ SDValue InFlag;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ unsigned Reg = toCallerWindow(RegsToPass[i].first);
+ Chain = DAG.getCopyToReg(Chain, dl, Reg, RegsToPass[i].second, InFlag);
+ InFlag = Chain.getValue(1);
+ }
+
+ unsigned SRetArgSize = (hasStructRetAttr)? getSRetArgSize(DAG, Callee):0;
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ // Likewise ExternalSymbol -> TargetExternalSymbol.
+ unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32, 0, TF);
+ else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32, TF);
+
+ // Returns a chain & a flag for retval copy to use
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ if (hasStructRetAttr)
+ Ops.push_back(DAG.getTargetConstant(SRetArgSize, dl, MVT::i32));
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(toCallerWindow(RegsToPass[i].first),
+ RegsToPass[i].second.getValueType()));
+
+ // Add a register mask operand representing the call-preserved registers.
+ const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const uint32_t *Mask =
+ ((hasReturnsTwice)
+ ? TRI->getRTCallPreservedMask(CallConv)
+ : TRI->getCallPreservedMask(DAG.getMachineFunction(), CallConv));
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
+ if (InFlag.getNode())
+ Ops.push_back(InFlag);
+
+ Chain = DAG.getNode(SPISD::CALL, dl, NodeTys, Ops);
+ InFlag = Chain.getValue(1);
+
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, dl, true),
+ DAG.getIntPtrConstant(0, dl, true), InFlag, dl);
+ InFlag = Chain.getValue(1);
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RVInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ RVInfo.AnalyzeCallResult(Ins, RetCC_Sparc32);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ if (RVLocs[i].getLocVT() == MVT::v2i32) {
+ SDValue Vec = DAG.getNode(ISD::UNDEF, dl, MVT::v2i32);
+ SDValue Lo = DAG.getCopyFromReg(
+ Chain, dl, toCallerWindow(RVLocs[i++].getLocReg()), MVT::i32, InFlag);
+ Chain = Lo.getValue(1);
+ InFlag = Lo.getValue(2);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Lo,
+ DAG.getConstant(0, dl, MVT::i32));
+ SDValue Hi = DAG.getCopyFromReg(
+ Chain, dl, toCallerWindow(RVLocs[i].getLocReg()), MVT::i32, InFlag);
+ Chain = Hi.getValue(1);
+ InFlag = Hi.getValue(2);
+ Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v2i32, Vec, Hi,
+ DAG.getConstant(1, dl, MVT::i32));
+ InVals.push_back(Vec);
+ } else {
+ Chain =
+ DAG.getCopyFromReg(Chain, dl, toCallerWindow(RVLocs[i].getLocReg()),
+ RVLocs[i].getValVT(), InFlag)
+ .getValue(1);
+ InFlag = Chain.getValue(2);
+ InVals.push_back(Chain.getValue(0));
+ }
+ }
+
+ return Chain;
+}
+
+// FIXME? Maybe this could be a TableGen attribute on some registers and
+// this table could be generated automatically from RegInfo.
+unsigned SparcTargetLowering::getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const {
+ unsigned Reg = StringSwitch<unsigned>(RegName)
+ .Case("i0", SP::I0).Case("i1", SP::I1).Case("i2", SP::I2).Case("i3", SP::I3)
+ .Case("i4", SP::I4).Case("i5", SP::I5).Case("i6", SP::I6).Case("i7", SP::I7)
+ .Case("o0", SP::O0).Case("o1", SP::O1).Case("o2", SP::O2).Case("o3", SP::O3)
+ .Case("o4", SP::O4).Case("o5", SP::O5).Case("o6", SP::O6).Case("o7", SP::O7)
+ .Case("l0", SP::L0).Case("l1", SP::L1).Case("l2", SP::L2).Case("l3", SP::L3)
+ .Case("l4", SP::L4).Case("l5", SP::L5).Case("l6", SP::L6).Case("l7", SP::L7)
+ .Case("g0", SP::G0).Case("g1", SP::G1).Case("g2", SP::G2).Case("g3", SP::G3)
+ .Case("g4", SP::G4).Case("g5", SP::G5).Case("g6", SP::G6).Case("g7", SP::G7)
+ .Default(0);
+
+ if (Reg)
+ return Reg;
+
+ report_fatal_error("Invalid register name global variable");
+}
+
+// This functions returns true if CalleeName is a ABI function that returns
+// a long double (fp128).
+static bool isFP128ABICall(const char *CalleeName)
+{
+ static const char *const ABICalls[] =
+ { "_Q_add", "_Q_sub", "_Q_mul", "_Q_div",
+ "_Q_sqrt", "_Q_neg",
+ "_Q_itoq", "_Q_stoq", "_Q_dtoq", "_Q_utoq",
+ "_Q_lltoq", "_Q_ulltoq",
+ nullptr
+ };
+ for (const char * const *I = ABICalls; *I != nullptr; ++I)
+ if (strcmp(CalleeName, *I) == 0)
+ return true;
+ return false;
+}
+
+unsigned
+SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
+{
+ const Function *CalleeFn = nullptr;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
+ CalleeFn = dyn_cast<Function>(G->getGlobal());
+ } else if (ExternalSymbolSDNode *E =
+ dyn_cast<ExternalSymbolSDNode>(Callee)) {
+ const Function &F = DAG.getMachineFunction().getFunction();
+ const Module *M = F.getParent();
+ const char *CalleeName = E->getSymbol();
+ CalleeFn = M->getFunction(CalleeName);
+ if (!CalleeFn && isFP128ABICall(CalleeName))
+ return 16; // Return sizeof(fp128)
+ }
+
+ if (!CalleeFn)
+ return 0;
+
+ // It would be nice to check for the sret attribute on CalleeFn here,
+ // but since it is not part of the function type, any check will misfire.
+
+ PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
+ Type *ElementTy = Ty->getElementType();
+ return DAG.getDataLayout().getTypeAllocSize(ElementTy);
+}
+
+
+// Fixup floating point arguments in the ... part of a varargs call.
+//
+// The SPARC v9 ABI requires that floating point arguments are treated the same
+// as integers when calling a varargs function. This does not apply to the
+// fixed arguments that are part of the function's prototype.
+//
+// This function post-processes a CCValAssign array created by
+// AnalyzeCallOperands().
+static void fixupVariableFloatArgs(SmallVectorImpl<CCValAssign> &ArgLocs,
+ ArrayRef<ISD::OutputArg> Outs) {
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ const CCValAssign &VA = ArgLocs[i];
+ MVT ValTy = VA.getLocVT();
+ // FIXME: What about f32 arguments? C promotes them to f64 when calling
+ // varargs functions.
+ if (!VA.isRegLoc() || (ValTy != MVT::f64 && ValTy != MVT::f128))
+ continue;
+ // The fixed arguments to a varargs function still go in FP registers.
+ if (Outs[VA.getValNo()].IsFixed)
+ continue;
+
+ // This floating point argument should be reassigned.
+ CCValAssign NewVA;
+
+ // Determine the offset into the argument array.
+ unsigned firstReg = (ValTy == MVT::f64) ? SP::D0 : SP::Q0;
+ unsigned argSize = (ValTy == MVT::f64) ? 8 : 16;
+ unsigned Offset = argSize * (VA.getLocReg() - firstReg);
+ assert(Offset < 16*8 && "Offset out of range, bad register enum?");
+
+ if (Offset < 6*8) {
+ // This argument should go in %i0-%i5.
+ unsigned IReg = SP::I0 + Offset/8;
+ if (ValTy == MVT::f64)
+ // Full register, just bitconvert into i64.
+ NewVA = CCValAssign::getReg(VA.getValNo(), VA.getValVT(),
+ IReg, MVT::i64, CCValAssign::BCvt);
+ else {
+ assert(ValTy == MVT::f128 && "Unexpected type!");
+ // Full register, just bitconvert into i128 -- We will lower this into
+ // two i64s in LowerCall_64.
+ NewVA = CCValAssign::getCustomReg(VA.getValNo(), VA.getValVT(),
+ IReg, MVT::i128, CCValAssign::BCvt);
+ }
+ } else {
+ // This needs to go to memory, we're out of integer registers.
+ NewVA = CCValAssign::getMem(VA.getValNo(), VA.getValVT(),
+ Offset, VA.getLocVT(), VA.getLocInfo());
+ }
+ ArgLocs[i] = NewVA;
+ }
+}
+
+// Lower a call for the 64-bit ABI.
+SDValue
+SparcTargetLowering::LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const {
+ SelectionDAG &DAG = CLI.DAG;
+ SDLoc DL = CLI.DL;
+ SDValue Chain = CLI.Chain;
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ // Sparc target does not yet support tail call optimization.
+ CLI.IsTailCall = false;
+
+ // Analyze operands of the call, assigning locations to each operand.
+ SmallVector<CCValAssign, 16> ArgLocs;
+ CCState CCInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), ArgLocs,
+ *DAG.getContext());
+ CCInfo.AnalyzeCallOperands(CLI.Outs, CC_Sparc64);
+
+ // Get the size of the outgoing arguments stack space requirement.
+ // The stack offset computed by CC_Sparc64 includes all arguments.
+ // Called functions expect 6 argument words to exist in the stack frame, used
+ // or not.
+ unsigned ArgsSize = std::max(6*8u, CCInfo.getNextStackOffset());
+
+ // Keep stack frames 16-byte aligned.
+ ArgsSize = alignTo(ArgsSize, 16);
+
+ // Varargs calls require special treatment.
+ if (CLI.IsVarArg)
+ fixupVariableFloatArgs(ArgLocs, CLI.Outs);
+
+ // Adjust the stack pointer to make room for the arguments.
+ // FIXME: Use hasReservedCallFrame to avoid %sp adjustments around all calls
+ // with more than 6 arguments.
+ Chain = DAG.getCALLSEQ_START(Chain, ArgsSize, 0, DL);
+
+ // Collect the set of registers to pass to the function and their values.
+ // This will be emitted as a sequence of CopyToReg nodes glued to the call
+ // instruction.
+ SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
+
+ // Collect chains from all the memory opeations that copy arguments to the
+ // stack. They must follow the stack pointer adjustment above and precede the
+ // call instruction itself.
+ SmallVector<SDValue, 8> MemOpChains;
+
+ for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
+ const CCValAssign &VA = ArgLocs[i];
+ SDValue Arg = CLI.OutVals[i];
+
+ // Promote the value if needed.
+ switch (VA.getLocInfo()) {
+ default:
+ llvm_unreachable("Unknown location info!");
+ case CCValAssign::Full:
+ break;
+ case CCValAssign::SExt:
+ Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::ZExt:
+ Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::AExt:
+ Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg);
+ break;
+ case CCValAssign::BCvt:
+ // fixupVariableFloatArgs() may create bitcasts from f128 to i128. But
+ // SPARC does not support i128 natively. Lower it into two i64, see below.
+ if (!VA.needsCustom() || VA.getValVT() != MVT::f128
+ || VA.getLocVT() != MVT::i128)
+ Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg);
+ break;
+ }
+
+ if (VA.isRegLoc()) {
+ if (VA.needsCustom() && VA.getValVT() == MVT::f128
+ && VA.getLocVT() == MVT::i128) {
+ // Store and reload into the integer register reg and reg+1.
+ unsigned Offset = 8 * (VA.getLocReg() - SP::I0);
+ unsigned StackOffset = Offset + Subtarget->getStackPointerBias() + 128;
+ SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
+ SDValue HiPtrOff = DAG.getIntPtrConstant(StackOffset, DL);
+ HiPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, HiPtrOff);
+ SDValue LoPtrOff = DAG.getIntPtrConstant(StackOffset + 8, DL);
+ LoPtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, LoPtrOff);
+
+ // Store to %sp+BIAS+128+Offset
+ SDValue Store =
+ DAG.getStore(Chain, DL, Arg, HiPtrOff, MachinePointerInfo());
+ // Load into Reg and Reg+1
+ SDValue Hi64 =
+ DAG.getLoad(MVT::i64, DL, Store, HiPtrOff, MachinePointerInfo());
+ SDValue Lo64 =
+ DAG.getLoad(MVT::i64, DL, Store, LoPtrOff, MachinePointerInfo());
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()),
+ Hi64));
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()+1),
+ Lo64));
+ continue;
+ }
+
+ // The custom bit on an i32 return value indicates that it should be
+ // passed in the high bits of the register.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom()) {
+ Arg = DAG.getNode(ISD::SHL, DL, MVT::i64, Arg,
+ DAG.getConstant(32, DL, MVT::i32));
+
+ // The next value may go in the low bits of the same register.
+ // Handle both at once.
+ if (i+1 < ArgLocs.size() && ArgLocs[i+1].isRegLoc() &&
+ ArgLocs[i+1].getLocReg() == VA.getLocReg()) {
+ SDValue NV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64,
+ CLI.OutVals[i+1]);
+ Arg = DAG.getNode(ISD::OR, DL, MVT::i64, Arg, NV);
+ // Skip the next value, it's already done.
+ ++i;
+ }
+ }
+ RegsToPass.push_back(std::make_pair(toCallerWindow(VA.getLocReg()), Arg));
+ continue;
+ }
+
+ assert(VA.isMemLoc());
+
+ // Create a store off the stack pointer for this argument.
+ SDValue StackPtr = DAG.getRegister(SP::O6, PtrVT);
+ // The argument area starts at %fp+BIAS+128 in the callee frame,
+ // %sp+BIAS+128 in ours.
+ SDValue PtrOff = DAG.getIntPtrConstant(VA.getLocMemOffset() +
+ Subtarget->getStackPointerBias() +
+ 128, DL);
+ PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff);
+ MemOpChains.push_back(
+ DAG.getStore(Chain, DL, Arg, PtrOff, MachinePointerInfo()));
+ }
+
+ // Emit all stores, make sure they occur before the call.
+ if (!MemOpChains.empty())
+ Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
+
+ // Build a sequence of CopyToReg nodes glued together with token chain and
+ // glue operands which copy the outgoing args into registers. The InGlue is
+ // necessary since all emitted instructions must be stuck together in order
+ // to pass the live physical registers.
+ SDValue InGlue;
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
+ Chain = DAG.getCopyToReg(Chain, DL,
+ RegsToPass[i].first, RegsToPass[i].second, InGlue);
+ InGlue = Chain.getValue(1);
+ }
+
+ // If the callee is a GlobalAddress node (quite common, every direct call is)
+ // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
+ // Likewise ExternalSymbol -> TargetExternalSymbol.
+ SDValue Callee = CLI.Callee;
+ bool hasReturnsTwice = hasReturnsTwiceAttr(DAG, Callee, CLI.CS);
+ unsigned TF = isPositionIndependent() ? SparcMCExpr::VK_Sparc_WPLT30 : 0;
+ if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
+ Callee = DAG.getTargetGlobalAddress(G->getGlobal(), DL, PtrVT, 0, TF);
+ else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
+ Callee = DAG.getTargetExternalSymbol(E->getSymbol(), PtrVT, TF);
+
+ // Build the operands for the call instruction itself.
+ SmallVector<SDValue, 8> Ops;
+ Ops.push_back(Chain);
+ Ops.push_back(Callee);
+ for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
+ Ops.push_back(DAG.getRegister(RegsToPass[i].first,
+ RegsToPass[i].second.getValueType()));
+
+ // Add a register mask operand representing the call-preserved registers.
+ const SparcRegisterInfo *TRI = Subtarget->getRegisterInfo();
+ const uint32_t *Mask =
+ ((hasReturnsTwice) ? TRI->getRTCallPreservedMask(CLI.CallConv)
+ : TRI->getCallPreservedMask(DAG.getMachineFunction(),
+ CLI.CallConv));
+ assert(Mask && "Missing call preserved mask for calling convention");
+ Ops.push_back(DAG.getRegisterMask(Mask));
+
+ // Make sure the CopyToReg nodes are glued to the call instruction which
+ // consumes the registers.
+ if (InGlue.getNode())
+ Ops.push_back(InGlue);
+
+ // Now the call itself.
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ Chain = DAG.getNode(SPISD::CALL, DL, NodeTys, Ops);
+ InGlue = Chain.getValue(1);
+
+ // Revert the stack pointer immediately after the call.
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(ArgsSize, DL, true),
+ DAG.getIntPtrConstant(0, DL, true), InGlue, DL);
+ InGlue = Chain.getValue(1);
+
+ // Now extract the return values. This is more or less the same as
+ // LowerFormalArguments_64.
+
+ // Assign locations to each value returned by this call.
+ SmallVector<CCValAssign, 16> RVLocs;
+ CCState RVInfo(CLI.CallConv, CLI.IsVarArg, DAG.getMachineFunction(), RVLocs,
+ *DAG.getContext());
+
+ // Set inreg flag manually for codegen generated library calls that
+ // return float.
+ if (CLI.Ins.size() == 1 && CLI.Ins[0].VT == MVT::f32 && !CLI.CS)
+ CLI.Ins[0].Flags.setInReg();
+
+ RVInfo.AnalyzeCallResult(CLI.Ins, RetCC_Sparc64);
+
+ // Copy all of the result registers out of their specified physreg.
+ for (unsigned i = 0; i != RVLocs.size(); ++i) {
+ CCValAssign &VA = RVLocs[i];
+ unsigned Reg = toCallerWindow(VA.getLocReg());
+
+ // When returning 'inreg {i32, i32 }', two consecutive i32 arguments can
+ // reside in the same register in the high and low bits. Reuse the
+ // CopyFromReg previous node to avoid duplicate copies.
+ SDValue RV;
+ if (RegisterSDNode *SrcReg = dyn_cast<RegisterSDNode>(Chain.getOperand(1)))
+ if (SrcReg->getReg() == Reg && Chain->getOpcode() == ISD::CopyFromReg)
+ RV = Chain.getValue(0);
+
+ // But usually we'll create a new CopyFromReg for a different register.
+ if (!RV.getNode()) {
+ RV = DAG.getCopyFromReg(Chain, DL, Reg, RVLocs[i].getLocVT(), InGlue);
+ Chain = RV.getValue(1);
+ InGlue = Chain.getValue(2);
+ }
+
+ // Get the high bits for i32 struct elements.
+ if (VA.getValVT() == MVT::i32 && VA.needsCustom())
+ RV = DAG.getNode(ISD::SRL, DL, VA.getLocVT(), RV,
+ DAG.getConstant(32, DL, MVT::i32));
+
+ // The callee promoted the return value, so insert an Assert?ext SDNode so
+ // we won't promote the value again in this function.
+ switch (VA.getLocInfo()) {
+ case CCValAssign::SExt:
+ RV = DAG.getNode(ISD::AssertSext, DL, VA.getLocVT(), RV,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ case CCValAssign::ZExt:
+ RV = DAG.getNode(ISD::AssertZext, DL, VA.getLocVT(), RV,
+ DAG.getValueType(VA.getValVT()));
+ break;
+ default:
+ break;
+ }
+
+ // Truncate the register down to the return value type.
+ if (VA.isExtInLoc())
+ RV = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), RV);
+
+ InVals.push_back(RV);
+ }
+
+ return Chain;
+}
+
+//===----------------------------------------------------------------------===//
+// TargetLowering Implementation
+//===----------------------------------------------------------------------===//
+
+TargetLowering::AtomicExpansionKind SparcTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
+ if (AI->getOperation() == AtomicRMWInst::Xchg &&
+ AI->getType()->getPrimitiveSizeInBits() == 32)
+ return AtomicExpansionKind::None; // Uses xchg instruction
+
+ return AtomicExpansionKind::CmpXChg;
+}
+
+/// IntCondCCodeToICC - Convert a DAG integer condition code to a SPARC ICC
+/// condition.
+static SPCC::CondCodes IntCondCCodeToICC(ISD::CondCode CC) {
+ switch (CC) {
+ default: llvm_unreachable("Unknown integer condition code!");
+ case ISD::SETEQ: return SPCC::ICC_E;
+ case ISD::SETNE: return SPCC::ICC_NE;
+ case ISD::SETLT: return SPCC::ICC_L;
+ case ISD::SETGT: return SPCC::ICC_G;
+ case ISD::SETLE: return SPCC::ICC_LE;
+ case ISD::SETGE: return SPCC::ICC_GE;
+ case ISD::SETULT: return SPCC::ICC_CS;
+ case ISD::SETULE: return SPCC::ICC_LEU;
+ case ISD::SETUGT: return SPCC::ICC_GU;
+ case ISD::SETUGE: return SPCC::ICC_CC;
+ }
+}
+
+/// FPCondCCodeToFCC - Convert a DAG floatingp oint condition code to a SPARC
+/// FCC condition.
+static SPCC::CondCodes FPCondCCodeToFCC(ISD::CondCode CC) {
+ switch (CC) {
+ default: llvm_unreachable("Unknown fp condition code!");
+ case ISD::SETEQ:
+ case ISD::SETOEQ: return SPCC::FCC_E;
+ case ISD::SETNE:
+ case ISD::SETUNE: return SPCC::FCC_NE;
+ case ISD::SETLT:
+ case ISD::SETOLT: return SPCC::FCC_L;
+ case ISD::SETGT:
+ case ISD::SETOGT: return SPCC::FCC_G;
+ case ISD::SETLE:
+ case ISD::SETOLE: return SPCC::FCC_LE;
+ case ISD::SETGE:
+ case ISD::SETOGE: return SPCC::FCC_GE;
+ case ISD::SETULT: return SPCC::FCC_UL;
+ case ISD::SETULE: return SPCC::FCC_ULE;
+ case ISD::SETUGT: return SPCC::FCC_UG;
+ case ISD::SETUGE: return SPCC::FCC_UGE;
+ case ISD::SETUO: return SPCC::FCC_U;
+ case ISD::SETO: return SPCC::FCC_O;
+ case ISD::SETONE: return SPCC::FCC_LG;
+ case ISD::SETUEQ: return SPCC::FCC_UE;
+ }
+}
+
+SparcTargetLowering::SparcTargetLowering(const TargetMachine &TM,
+ const SparcSubtarget &STI)
+ : TargetLowering(TM), Subtarget(&STI) {
+ MVT PtrVT = MVT::getIntegerVT(8 * TM.getPointerSize());
+
+ // Instructions which use registers as conditionals examine all the
+ // bits (as does the pseudo SELECT_CC expansion). I don't think it
+ // matters much whether it's ZeroOrOneBooleanContent, or
+ // ZeroOrNegativeOneBooleanContent, so, arbitrarily choose the
+ // former.
+ setBooleanContents(ZeroOrOneBooleanContent);
+ setBooleanVectorContents(ZeroOrOneBooleanContent);
+
+ // Set up the register classes.
+ addRegisterClass(MVT::i32, &SP::IntRegsRegClass);
+ if (!Subtarget->useSoftFloat()) {
+ addRegisterClass(MVT::f32, &SP::FPRegsRegClass);
+ addRegisterClass(MVT::f64, &SP::DFPRegsRegClass);
+ addRegisterClass(MVT::f128, &SP::QFPRegsRegClass);
+ }
+ if (Subtarget->is64Bit()) {
+ addRegisterClass(MVT::i64, &SP::I64RegsRegClass);
+ } else {
+ // On 32bit sparc, we define a double-register 32bit register
+ // class, as well. This is modeled in LLVM as a 2-vector of i32.
+ addRegisterClass(MVT::v2i32, &SP::IntPairRegClass);
+
+ // ...but almost all operations must be expanded, so set that as
+ // the default.
+ for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op) {
+ setOperationAction(Op, MVT::v2i32, Expand);
+ }
+ // Truncating/extending stores/loads are also not supported.
+ for (MVT VT : MVT::integer_vector_valuetypes()) {
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, VT, MVT::v2i32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Expand);
+
+ setLoadExtAction(ISD::SEXTLOAD, MVT::v2i32, VT, Expand);
+ setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i32, VT, Expand);
+ setLoadExtAction(ISD::EXTLOAD, MVT::v2i32, VT, Expand);
+
+ setTruncStoreAction(VT, MVT::v2i32, Expand);
+ setTruncStoreAction(MVT::v2i32, VT, Expand);
+ }
+ // However, load and store *are* legal.
+ setOperationAction(ISD::LOAD, MVT::v2i32, Legal);
+ setOperationAction(ISD::STORE, MVT::v2i32, Legal);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i32, Legal);
+ setOperationAction(ISD::BUILD_VECTOR, MVT::v2i32, Legal);
+
+ // And we need to promote i64 loads/stores into vector load/store
+ setOperationAction(ISD::LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::STORE, MVT::i64, Custom);
+
+ // Sadly, this doesn't work:
+ // AddPromotedToType(ISD::LOAD, MVT::i64, MVT::v2i32);
+ // AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
+ }
+
+ // Turn FP extload into load/fpextend
+ for (MVT VT : MVT::fp_valuetypes()) {
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
+ setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand);
+ }
+
+ // Sparc doesn't have i1 sign extending load
+ for (MVT VT : MVT::integer_valuetypes())
+ setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
+
+ // Turn FP truncstore into trunc + store.
+ setTruncStoreAction(MVT::f64, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f32, Expand);
+ setTruncStoreAction(MVT::f128, MVT::f64, Expand);
+
+ // Custom legalize GlobalAddress nodes into LO/HI parts.
+ setOperationAction(ISD::GlobalAddress, PtrVT, Custom);
+ setOperationAction(ISD::GlobalTLSAddress, PtrVT, Custom);
+ setOperationAction(ISD::ConstantPool, PtrVT, Custom);
+ setOperationAction(ISD::BlockAddress, PtrVT, Custom);
+
+ // Sparc doesn't have sext_inreg, replace them with shl/sra
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Expand);
+ setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
+
+ // Sparc has no REM or DIVREM operations.
+ setOperationAction(ISD::UREM, MVT::i32, Expand);
+ setOperationAction(ISD::SREM, MVT::i32, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
+
+ // ... nor does SparcV9.
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UREM, MVT::i64, Expand);
+ setOperationAction(ISD::SREM, MVT::i64, Expand);
+ setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
+ setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
+ }
+
+ // Custom expand fp<->sint
+ setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom);
+ setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom);
+
+ // Custom Expand fp<->uint
+ setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom);
+ setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom);
+ setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom);
+
+ setOperationAction(ISD::BITCAST, MVT::f32, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i32, Expand);
+
+ // Sparc has no select or setcc: expand to SELECT_CC.
+ setOperationAction(ISD::SELECT, MVT::i32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f32, Expand);
+ setOperationAction(ISD::SELECT, MVT::f64, Expand);
+ setOperationAction(ISD::SELECT, MVT::f128, Expand);
+
+ setOperationAction(ISD::SETCC, MVT::i32, Expand);
+ setOperationAction(ISD::SETCC, MVT::f32, Expand);
+ setOperationAction(ISD::SETCC, MVT::f64, Expand);
+ setOperationAction(ISD::SETCC, MVT::f128, Expand);
+
+ // Sparc doesn't have BRCOND either, it has BR_CC.
+ setOperationAction(ISD::BRCOND, MVT::Other, Expand);
+ setOperationAction(ISD::BRIND, MVT::Other, Expand);
+ setOperationAction(ISD::BR_JT, MVT::Other, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f32, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f64, Custom);
+ setOperationAction(ISD::BR_CC, MVT::f128, Custom);
+
+ setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::f128, Custom);
+
+ setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
+ setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
+
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ADDC, MVT::i64, Custom);
+ setOperationAction(ISD::ADDE, MVT::i64, Custom);
+ setOperationAction(ISD::SUBC, MVT::i64, Custom);
+ setOperationAction(ISD::SUBE, MVT::i64, Custom);
+ setOperationAction(ISD::BITCAST, MVT::f64, Expand);
+ setOperationAction(ISD::BITCAST, MVT::i64, Expand);
+ setOperationAction(ISD::SELECT, MVT::i64, Expand);
+ setOperationAction(ISD::SETCC, MVT::i64, Expand);
+ setOperationAction(ISD::BR_CC, MVT::i64, Custom);
+ setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
+
+ setOperationAction(ISD::CTPOP, MVT::i64,
+ Subtarget->usePopc() ? Legal : Expand);
+ setOperationAction(ISD::CTTZ , MVT::i64, Expand);
+ setOperationAction(ISD::CTLZ , MVT::i64, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i64, Expand);
+ setOperationAction(ISD::ROTL , MVT::i64, Expand);
+ setOperationAction(ISD::ROTR , MVT::i64, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom);
+ }
+
+ // ATOMICs.
+ // Atomics are supported on SparcV9. 32-bit atomics are also
+ // supported by some Leon SparcV8 variants. Otherwise, atomics
+ // are unsupported.
+ if (Subtarget->isV9())
+ setMaxAtomicSizeInBitsSupported(64);
+ else if (Subtarget->hasLeonCasa())
+ setMaxAtomicSizeInBitsSupported(32);
+ else
+ setMaxAtomicSizeInBitsSupported(0);
+
+ setMinCmpXchgSizeInBits(32);
+
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i32, Legal);
+
+ setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Legal);
+
+ // Custom Lower Atomic LOAD/STORE
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i32, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i32, Custom);
+
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i64, Legal);
+ setOperationAction(ISD::ATOMIC_SWAP, MVT::i64, Legal);
+ setOperationAction(ISD::ATOMIC_LOAD, MVT::i64, Custom);
+ setOperationAction(ISD::ATOMIC_STORE, MVT::i64, Custom);
+ }
+
+ if (!Subtarget->is64Bit()) {
+ // These libcalls are not available in 32-bit.
+ setLibcallName(RTLIB::SHL_I128, nullptr);
+ setLibcallName(RTLIB::SRL_I128, nullptr);
+ setLibcallName(RTLIB::SRA_I128, nullptr);
+ }
+
+ if (!Subtarget->isV9()) {
+ // SparcV8 does not have FNEGD and FABSD.
+ setOperationAction(ISD::FNEG, MVT::f64, Custom);
+ setOperationAction(ISD::FABS, MVT::f64, Custom);
+ }
+
+ setOperationAction(ISD::FSIN , MVT::f128, Expand);
+ setOperationAction(ISD::FCOS , MVT::f128, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f128, Expand);
+ setOperationAction(ISD::FREM , MVT::f128, Expand);
+ setOperationAction(ISD::FMA , MVT::f128, Expand);
+ setOperationAction(ISD::FSIN , MVT::f64, Expand);
+ setOperationAction(ISD::FCOS , MVT::f64, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
+ setOperationAction(ISD::FREM , MVT::f64, Expand);
+ setOperationAction(ISD::FMA , MVT::f64, Expand);
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
+ setOperationAction(ISD::FMA , MVT::f32, Expand);
+ setOperationAction(ISD::CTTZ , MVT::i32, Expand);
+ setOperationAction(ISD::CTLZ , MVT::i32, Expand);
+ setOperationAction(ISD::ROTL , MVT::i32, Expand);
+ setOperationAction(ISD::ROTR , MVT::i32, Expand);
+ setOperationAction(ISD::BSWAP, MVT::i32, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
+ setOperationAction(ISD::FPOW , MVT::f128, Expand);
+ setOperationAction(ISD::FPOW , MVT::f64, Expand);
+ setOperationAction(ISD::FPOW , MVT::f32, Expand);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
+
+ // Expands to [SU]MUL_LOHI.
+ setOperationAction(ISD::MULHU, MVT::i32, Expand);
+ setOperationAction(ISD::MULHS, MVT::i32, Expand);
+ setOperationAction(ISD::MUL, MVT::i32, Expand);
+
+ if (Subtarget->useSoftMulDiv()) {
+ // .umul works for both signed and unsigned
+ setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
+ setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
+ setLibcallName(RTLIB::MUL_I32, ".umul");
+
+ setOperationAction(ISD::SDIV, MVT::i32, Expand);
+ setLibcallName(RTLIB::SDIV_I32, ".div");
+
+ setOperationAction(ISD::UDIV, MVT::i32, Expand);
+ setLibcallName(RTLIB::UDIV_I32, ".udiv");
+ }
+
+ if (Subtarget->is64Bit()) {
+ setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
+ setOperationAction(ISD::MULHU, MVT::i64, Expand);
+ setOperationAction(ISD::MULHS, MVT::i64, Expand);
+
+ setOperationAction(ISD::UMULO, MVT::i64, Custom);
+ setOperationAction(ISD::SMULO, MVT::i64, Custom);
+
+ setOperationAction(ISD::SHL_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRA_PARTS, MVT::i64, Expand);
+ setOperationAction(ISD::SRL_PARTS, MVT::i64, Expand);
+ }
+
+ // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
+ setOperationAction(ISD::VASTART , MVT::Other, Custom);
+ // VAARG needs to be lowered to not do unaligned accesses for doubles.
+ setOperationAction(ISD::VAARG , MVT::Other, Custom);
+
+ setOperationAction(ISD::TRAP , MVT::Other, Legal);
+
+ // Use the default implementation.
+ setOperationAction(ISD::VACOPY , MVT::Other, Expand);
+ setOperationAction(ISD::VAEND , MVT::Other, Expand);
+ setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
+ setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
+ setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
+
+ setStackPointerRegisterToSaveRestore(SP::O6);
+
+ setOperationAction(ISD::CTPOP, MVT::i32,
+ Subtarget->usePopc() ? Legal : Expand);
+
+ if (Subtarget->isV9() && Subtarget->hasHardQuad()) {
+ setOperationAction(ISD::LOAD, MVT::f128, Legal);
+ setOperationAction(ISD::STORE, MVT::f128, Legal);
+ } else {
+ setOperationAction(ISD::LOAD, MVT::f128, Custom);
+ setOperationAction(ISD::STORE, MVT::f128, Custom);
+ }
+
+ if (Subtarget->hasHardQuad()) {
+ setOperationAction(ISD::FADD, MVT::f128, Legal);
+ setOperationAction(ISD::FSUB, MVT::f128, Legal);
+ setOperationAction(ISD::FMUL, MVT::f128, Legal);
+ setOperationAction(ISD::FDIV, MVT::f128, Legal);
+ setOperationAction(ISD::FSQRT, MVT::f128, Legal);
+ setOperationAction(ISD::FP_EXTEND, MVT::f128, Legal);
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Legal);
+ if (Subtarget->isV9()) {
+ setOperationAction(ISD::FNEG, MVT::f128, Legal);
+ setOperationAction(ISD::FABS, MVT::f128, Legal);
+ } else {
+ setOperationAction(ISD::FNEG, MVT::f128, Custom);
+ setOperationAction(ISD::FABS, MVT::f128, Custom);
+ }
+
+ if (!Subtarget->is64Bit()) {
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
+ }
+
+ } else {
+ // Custom legalize f128 operations.
+
+ setOperationAction(ISD::FADD, MVT::f128, Custom);
+ setOperationAction(ISD::FSUB, MVT::f128, Custom);
+ setOperationAction(ISD::FMUL, MVT::f128, Custom);
+ setOperationAction(ISD::FDIV, MVT::f128, Custom);
+ setOperationAction(ISD::FSQRT, MVT::f128, Custom);
+ setOperationAction(ISD::FNEG, MVT::f128, Custom);
+ setOperationAction(ISD::FABS, MVT::f128, Custom);
+
+ setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f64, Custom);
+ setOperationAction(ISD::FP_ROUND, MVT::f32, Custom);
+
+ // Setup Runtime library names.
+ if (Subtarget->is64Bit() && !Subtarget->useSoftFloat()) {
+ setLibcallName(RTLIB::ADD_F128, "_Qp_add");
+ setLibcallName(RTLIB::SUB_F128, "_Qp_sub");
+ setLibcallName(RTLIB::MUL_F128, "_Qp_mul");
+ setLibcallName(RTLIB::DIV_F128, "_Qp_div");
+ setLibcallName(RTLIB::SQRT_F128, "_Qp_sqrt");
+ setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Qp_qtoi");
+ setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Qp_qtoui");
+ setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Qp_itoq");
+ setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Qp_uitoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Qp_qtox");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Qp_qtoux");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Qp_xtoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Qp_uxtoq");
+ setLibcallName(RTLIB::FPEXT_F32_F128, "_Qp_stoq");
+ setLibcallName(RTLIB::FPEXT_F64_F128, "_Qp_dtoq");
+ setLibcallName(RTLIB::FPROUND_F128_F32, "_Qp_qtos");
+ setLibcallName(RTLIB::FPROUND_F128_F64, "_Qp_qtod");
+ } else if (!Subtarget->useSoftFloat()) {
+ setLibcallName(RTLIB::ADD_F128, "_Q_add");
+ setLibcallName(RTLIB::SUB_F128, "_Q_sub");
+ setLibcallName(RTLIB::MUL_F128, "_Q_mul");
+ setLibcallName(RTLIB::DIV_F128, "_Q_div");
+ setLibcallName(RTLIB::SQRT_F128, "_Q_sqrt");
+ setLibcallName(RTLIB::FPTOSINT_F128_I32, "_Q_qtoi");
+ setLibcallName(RTLIB::FPTOUINT_F128_I32, "_Q_qtou");
+ setLibcallName(RTLIB::SINTTOFP_I32_F128, "_Q_itoq");
+ setLibcallName(RTLIB::UINTTOFP_I32_F128, "_Q_utoq");
+ setLibcallName(RTLIB::FPTOSINT_F128_I64, "_Q_qtoll");
+ setLibcallName(RTLIB::FPTOUINT_F128_I64, "_Q_qtoull");
+ setLibcallName(RTLIB::SINTTOFP_I64_F128, "_Q_lltoq");
+ setLibcallName(RTLIB::UINTTOFP_I64_F128, "_Q_ulltoq");
+ setLibcallName(RTLIB::FPEXT_F32_F128, "_Q_stoq");
+ setLibcallName(RTLIB::FPEXT_F64_F128, "_Q_dtoq");
+ setLibcallName(RTLIB::FPROUND_F128_F32, "_Q_qtos");
+ setLibcallName(RTLIB::FPROUND_F128_F64, "_Q_qtod");
+ }
+ }
+
+ if (Subtarget->fixAllFDIVSQRT()) {
+ // Promote FDIVS and FSQRTS to FDIVD and FSQRTD instructions instead as
+ // the former instructions generate errata on LEON processors.
+ setOperationAction(ISD::FDIV, MVT::f32, Promote);
+ setOperationAction(ISD::FSQRT, MVT::f32, Promote);
+ }
+
+ if (Subtarget->hasNoFMULS()) {
+ setOperationAction(ISD::FMUL, MVT::f32, Promote);
+ }
+
+ setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
+
+ setMinFunctionAlignment(2);
+
+ computeRegisterProperties(Subtarget->getRegisterInfo());
+}
+
+bool SparcTargetLowering::useSoftFloat() const {
+ return Subtarget->useSoftFloat();
+}
+
+const char *SparcTargetLowering::getTargetNodeName(unsigned Opcode) const {
+ switch ((SPISD::NodeType)Opcode) {
+ case SPISD::FIRST_NUMBER: break;
+ case SPISD::CMPICC: return "SPISD::CMPICC";
+ case SPISD::CMPFCC: return "SPISD::CMPFCC";
+ case SPISD::BRICC: return "SPISD::BRICC";
+ case SPISD::BRXCC: return "SPISD::BRXCC";
+ case SPISD::BRFCC: return "SPISD::BRFCC";
+ case SPISD::SELECT_ICC: return "SPISD::SELECT_ICC";
+ case SPISD::SELECT_XCC: return "SPISD::SELECT_XCC";
+ case SPISD::SELECT_FCC: return "SPISD::SELECT_FCC";
+ case SPISD::EH_SJLJ_SETJMP: return "SPISD::EH_SJLJ_SETJMP";
+ case SPISD::EH_SJLJ_LONGJMP: return "SPISD::EH_SJLJ_LONGJMP";
+ case SPISD::Hi: return "SPISD::Hi";
+ case SPISD::Lo: return "SPISD::Lo";
+ case SPISD::FTOI: return "SPISD::FTOI";
+ case SPISD::ITOF: return "SPISD::ITOF";
+ case SPISD::FTOX: return "SPISD::FTOX";
+ case SPISD::XTOF: return "SPISD::XTOF";
+ case SPISD::CALL: return "SPISD::CALL";
+ case SPISD::RET_FLAG: return "SPISD::RET_FLAG";
+ case SPISD::GLOBAL_BASE_REG: return "SPISD::GLOBAL_BASE_REG";
+ case SPISD::FLUSHW: return "SPISD::FLUSHW";
+ case SPISD::TLS_ADD: return "SPISD::TLS_ADD";
+ case SPISD::TLS_LD: return "SPISD::TLS_LD";
+ case SPISD::TLS_CALL: return "SPISD::TLS_CALL";
+ }
+ return nullptr;
+}
+
+EVT SparcTargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &,
+ EVT VT) const {
+ if (!VT.isVector())
+ return MVT::i32;
+ return VT.changeVectorElementTypeToInteger();
+}
+
+/// isMaskedValueZeroForTargetNode - Return true if 'Op & Mask' is known to
+/// be zero. Op is expected to be a target specific node. Used by DAG
+/// combiner.
+void SparcTargetLowering::computeKnownBitsForTargetNode
+ (const SDValue Op,
+ KnownBits &Known,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth) const {
+ KnownBits Known2;
+ Known.resetAll();
+
+ switch (Op.getOpcode()) {
+ default: break;
+ case SPISD::SELECT_ICC:
+ case SPISD::SELECT_XCC:
+ case SPISD::SELECT_FCC:
+ DAG.computeKnownBits(Op.getOperand(1), Known, Depth+1);
+ DAG.computeKnownBits(Op.getOperand(0), Known2, Depth+1);
+
+ // Only known if known in both the LHS and RHS.
+ Known.One &= Known2.One;
+ Known.Zero &= Known2.Zero;
+ break;
+ }
+}
+
+// Look at LHS/RHS/CC and see if they are a lowered setcc instruction. If so
+// set LHS/RHS and SPCC to the LHS/RHS of the setcc and SPCC to the condition.
+static void LookThroughSetCC(SDValue &LHS, SDValue &RHS,
+ ISD::CondCode CC, unsigned &SPCC) {
+ if (isNullConstant(RHS) &&
+ CC == ISD::SETNE &&
+ (((LHS.getOpcode() == SPISD::SELECT_ICC ||
+ LHS.getOpcode() == SPISD::SELECT_XCC) &&
+ LHS.getOperand(3).getOpcode() == SPISD::CMPICC) ||
+ (LHS.getOpcode() == SPISD::SELECT_FCC &&
+ LHS.getOperand(3).getOpcode() == SPISD::CMPFCC)) &&
+ isOneConstant(LHS.getOperand(0)) &&
+ isNullConstant(LHS.getOperand(1))) {
+ SDValue CMPCC = LHS.getOperand(3);
+ SPCC = cast<ConstantSDNode>(LHS.getOperand(2))->getZExtValue();
+ LHS = CMPCC.getOperand(0);
+ RHS = CMPCC.getOperand(1);
+ }
+}
+
+// Convert to a target node and set target flags.
+SDValue SparcTargetLowering::withTargetFlags(SDValue Op, unsigned TF,
+ SelectionDAG &DAG) const {
+ if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op))
+ return DAG.getTargetGlobalAddress(GA->getGlobal(),
+ SDLoc(GA),
+ GA->getValueType(0),
+ GA->getOffset(), TF);
+
+ if (const ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(Op))
+ return DAG.getTargetConstantPool(CP->getConstVal(),
+ CP->getValueType(0),
+ CP->getAlignment(),
+ CP->getOffset(), TF);
+
+ if (const BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(Op))
+ return DAG.getTargetBlockAddress(BA->getBlockAddress(),
+ Op.getValueType(),
+ 0,
+ TF);
+
+ if (const ExternalSymbolSDNode *ES = dyn_cast<ExternalSymbolSDNode>(Op))
+ return DAG.getTargetExternalSymbol(ES->getSymbol(),
+ ES->getValueType(0), TF);
+
+ llvm_unreachable("Unhandled address SDNode");
+}
+
+// Split Op into high and low parts according to HiTF and LoTF.
+// Return an ADD node combining the parts.
+SDValue SparcTargetLowering::makeHiLoPair(SDValue Op,
+ unsigned HiTF, unsigned LoTF,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = Op.getValueType();
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, VT, withTargetFlags(Op, HiTF, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, VT, withTargetFlags(Op, LoTF, DAG));
+ return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
+}
+
+// Build SDNodes for producing an address from a GlobalAddress, ConstantPool,
+// or ExternalSymbol SDNode.
+SDValue SparcTargetLowering::makeAddress(SDValue Op, SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ EVT VT = getPointerTy(DAG.getDataLayout());
+
+ // Handle PIC mode first. SPARC needs a got load for every variable!
+ if (isPositionIndependent()) {
+ // This is the pic32 code model, the GOT is known to be smaller than 4GB.
+ SDValue HiLo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_GOT22,
+ SparcMCExpr::VK_Sparc_GOT10, DAG);
+ SDValue GlobalBase = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, VT);
+ SDValue AbsAddr = DAG.getNode(ISD::ADD, DL, VT, GlobalBase, HiLo);
+ // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
+ // function has calls.
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI.setHasCalls(true);
+ return DAG.getLoad(VT, DL, DAG.getEntryNode(), AbsAddr,
+ MachinePointerInfo::getGOT(DAG.getMachineFunction()));
+ }
+
+ // This is one of the absolute code models.
+ switch(getTargetMachine().getCodeModel()) {
+ default:
+ llvm_unreachable("Unsupported absolute code model");
+ case CodeModel::Small:
+ // abs32.
+ return makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
+ SparcMCExpr::VK_Sparc_LO, DAG);
+ case CodeModel::Medium: {
+ // abs44.
+ SDValue H44 = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_H44,
+ SparcMCExpr::VK_Sparc_M44, DAG);
+ H44 = DAG.getNode(ISD::SHL, DL, VT, H44, DAG.getConstant(12, DL, MVT::i32));
+ SDValue L44 = withTargetFlags(Op, SparcMCExpr::VK_Sparc_L44, DAG);
+ L44 = DAG.getNode(SPISD::Lo, DL, VT, L44);
+ return DAG.getNode(ISD::ADD, DL, VT, H44, L44);
+ }
+ case CodeModel::Large: {
+ // abs64.
+ SDValue Hi = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HH,
+ SparcMCExpr::VK_Sparc_HM, DAG);
+ Hi = DAG.getNode(ISD::SHL, DL, VT, Hi, DAG.getConstant(32, DL, MVT::i32));
+ SDValue Lo = makeHiLoPair(Op, SparcMCExpr::VK_Sparc_HI,
+ SparcMCExpr::VK_Sparc_LO, DAG);
+ return DAG.getNode(ISD::ADD, DL, VT, Hi, Lo);
+ }
+ }
+}
+
+SDValue SparcTargetLowering::LowerGlobalAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ return makeAddress(Op, DAG);
+}
+
+SDValue SparcTargetLowering::LowerConstantPool(SDValue Op,
+ SelectionDAG &DAG) const {
+ return makeAddress(Op, DAG);
+}
+
+SDValue SparcTargetLowering::LowerBlockAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+ return makeAddress(Op, DAG);
+}
+
+SDValue SparcTargetLowering::LowerGlobalTLSAddress(SDValue Op,
+ SelectionDAG &DAG) const {
+
+ GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
+ if (DAG.getTarget().Options.EmulatedTLS)
+ return LowerToTLSEmulatedModel(GA, DAG);
+
+ SDLoc DL(GA);
+ const GlobalValue *GV = GA->getGlobal();
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+
+ TLSModel::Model model = getTargetMachine().getTLSModel(GV);
+
+ if (model == TLSModel::GeneralDynamic || model == TLSModel::LocalDynamic) {
+ unsigned HiTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_HI22
+ : SparcMCExpr::VK_Sparc_TLS_LDM_HI22);
+ unsigned LoTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_LO10
+ : SparcMCExpr::VK_Sparc_TLS_LDM_LO10);
+ unsigned addTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_ADD
+ : SparcMCExpr::VK_Sparc_TLS_LDM_ADD);
+ unsigned callTF = ((model == TLSModel::GeneralDynamic)
+ ? SparcMCExpr::VK_Sparc_TLS_GD_CALL
+ : SparcMCExpr::VK_Sparc_TLS_LDM_CALL);
+
+ SDValue HiLo = makeHiLoPair(Op, HiTF, LoTF, DAG);
+ SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
+ SDValue Argument = DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Base, HiLo,
+ withTargetFlags(Op, addTF, DAG));
+
+ SDValue Chain = DAG.getEntryNode();
+ SDValue InFlag;
+
+ Chain = DAG.getCALLSEQ_START(Chain, 1, 0, DL);
+ Chain = DAG.getCopyToReg(Chain, DL, SP::O0, Argument, InFlag);
+ InFlag = Chain.getValue(1);
+ SDValue Callee = DAG.getTargetExternalSymbol("__tls_get_addr", PtrVT);
+ SDValue Symbol = withTargetFlags(Op, callTF, DAG);
+
+ SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
+ const uint32_t *Mask = Subtarget->getRegisterInfo()->getCallPreservedMask(
+ DAG.getMachineFunction(), CallingConv::C);
+ assert(Mask && "Missing call preserved mask for calling convention");
+ SDValue Ops[] = {Chain,
+ Callee,
+ Symbol,
+ DAG.getRegister(SP::O0, PtrVT),
+ DAG.getRegisterMask(Mask),
+ InFlag};
+ Chain = DAG.getNode(SPISD::TLS_CALL, DL, NodeTys, Ops);
+ InFlag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(1, DL, true),
+ DAG.getIntPtrConstant(0, DL, true), InFlag, DL);
+ InFlag = Chain.getValue(1);
+ SDValue Ret = DAG.getCopyFromReg(Chain, DL, SP::O0, PtrVT, InFlag);
+
+ if (model != TLSModel::LocalDynamic)
+ return Ret;
+
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_HIX22, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_LOX10, DAG));
+ HiLo = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
+ return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT, Ret, HiLo,
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LDO_ADD, DAG));
+ }
+
+ if (model == TLSModel::InitialExec) {
+ unsigned ldTF = ((PtrVT == MVT::i64)? SparcMCExpr::VK_Sparc_TLS_IE_LDX
+ : SparcMCExpr::VK_Sparc_TLS_IE_LD);
+
+ SDValue Base = DAG.getNode(SPISD::GLOBAL_BASE_REG, DL, PtrVT);
+
+ // GLOBAL_BASE_REG codegen'ed with call. Inform MFI that this
+ // function has calls.
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI.setHasCalls(true);
+
+ SDValue TGA = makeHiLoPair(Op,
+ SparcMCExpr::VK_Sparc_TLS_IE_HI22,
+ SparcMCExpr::VK_Sparc_TLS_IE_LO10, DAG);
+ SDValue Ptr = DAG.getNode(ISD::ADD, DL, PtrVT, Base, TGA);
+ SDValue Offset = DAG.getNode(SPISD::TLS_LD,
+ DL, PtrVT, Ptr,
+ withTargetFlags(Op, ldTF, DAG));
+ return DAG.getNode(SPISD::TLS_ADD, DL, PtrVT,
+ DAG.getRegister(SP::G7, PtrVT), Offset,
+ withTargetFlags(Op,
+ SparcMCExpr::VK_Sparc_TLS_IE_ADD, DAG));
+ }
+
+ assert(model == TLSModel::LocalExec);
+ SDValue Hi = DAG.getNode(SPISD::Hi, DL, PtrVT,
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_HIX22, DAG));
+ SDValue Lo = DAG.getNode(SPISD::Lo, DL, PtrVT,
+ withTargetFlags(Op, SparcMCExpr::VK_Sparc_TLS_LE_LOX10, DAG));
+ SDValue Offset = DAG.getNode(ISD::XOR, DL, PtrVT, Hi, Lo);
+
+ return DAG.getNode(ISD::ADD, DL, PtrVT,
+ DAG.getRegister(SP::G7, PtrVT), Offset);
+}
+
+SDValue SparcTargetLowering::LowerF128_LibCallArg(SDValue Chain,
+ ArgListTy &Args, SDValue Arg,
+ const SDLoc &DL,
+ SelectionDAG &DAG) const {
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ EVT ArgVT = Arg.getValueType();
+ Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
+
+ ArgListEntry Entry;
+ Entry.Node = Arg;
+ Entry.Ty = ArgTy;
+
+ if (ArgTy->isFP128Ty()) {
+ // Create a stack object and pass the pointer to the library function.
+ int FI = MFI.CreateStackObject(16, 8, false);
+ SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
+ Chain = DAG.getStore(Chain, DL, Entry.Node, FIPtr, MachinePointerInfo(),
+ /* Alignment = */ 8);
+
+ Entry.Node = FIPtr;
+ Entry.Ty = PointerType::getUnqual(ArgTy);
+ }
+ Args.push_back(Entry);
+ return Chain;
+}
+
+SDValue
+SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
+ const char *LibFuncName,
+ unsigned numArgs) const {
+
+ ArgListTy Args;
+
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+
+ SDValue Callee = DAG.getExternalSymbol(LibFuncName, PtrVT);
+ Type *RetTy = Op.getValueType().getTypeForEVT(*DAG.getContext());
+ Type *RetTyABI = RetTy;
+ SDValue Chain = DAG.getEntryNode();
+ SDValue RetPtr;
+
+ if (RetTy->isFP128Ty()) {
+ // Create a Stack Object to receive the return value of type f128.
+ ArgListEntry Entry;
+ int RetFI = MFI.CreateStackObject(16, 8, false);
+ RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
+ Entry.Node = RetPtr;
+ Entry.Ty = PointerType::getUnqual(RetTy);
+ if (!Subtarget->is64Bit())
+ Entry.IsSRet = true;
+ Entry.IsReturned = false;
+ Args.push_back(Entry);
+ RetTyABI = Type::getVoidTy(*DAG.getContext());
+ }
+
+ assert(Op->getNumOperands() >= numArgs && "Not enough operands!");
+ for (unsigned i = 0, e = numArgs; i != e; ++i) {
+ Chain = LowerF128_LibCallArg(Chain, Args, Op.getOperand(i), SDLoc(Op), DAG);
+ }
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(SDLoc(Op)).setChain(Chain)
+ .setCallee(CallingConv::C, RetTyABI, Callee, std::move(Args));
+
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ // chain is in second result.
+ if (RetTyABI == RetTy)
+ return CallInfo.first;
+
+ assert (RetTy->isFP128Ty() && "Unexpected return type!");
+
+ Chain = CallInfo.second;
+
+ // Load RetPtr to get the return value.
+ return DAG.getLoad(Op.getValueType(), SDLoc(Op), Chain, RetPtr,
+ MachinePointerInfo(), /* Alignment = */ 8);
+}
+
+SDValue SparcTargetLowering::LowerF128Compare(SDValue LHS, SDValue RHS,
+ unsigned &SPCC, const SDLoc &DL,
+ SelectionDAG &DAG) const {
+
+ const char *LibCall = nullptr;
+ bool is64Bit = Subtarget->is64Bit();
+ switch(SPCC) {
+ default: llvm_unreachable("Unhandled conditional code!");
+ case SPCC::FCC_E : LibCall = is64Bit? "_Qp_feq" : "_Q_feq"; break;
+ case SPCC::FCC_NE : LibCall = is64Bit? "_Qp_fne" : "_Q_fne"; break;
+ case SPCC::FCC_L : LibCall = is64Bit? "_Qp_flt" : "_Q_flt"; break;
+ case SPCC::FCC_G : LibCall = is64Bit? "_Qp_fgt" : "_Q_fgt"; break;
+ case SPCC::FCC_LE : LibCall = is64Bit? "_Qp_fle" : "_Q_fle"; break;
+ case SPCC::FCC_GE : LibCall = is64Bit? "_Qp_fge" : "_Q_fge"; break;
+ case SPCC::FCC_UL :
+ case SPCC::FCC_ULE:
+ case SPCC::FCC_UG :
+ case SPCC::FCC_UGE:
+ case SPCC::FCC_U :
+ case SPCC::FCC_O :
+ case SPCC::FCC_LG :
+ case SPCC::FCC_UE : LibCall = is64Bit? "_Qp_cmp" : "_Q_cmp"; break;
+ }
+
+ auto PtrVT = getPointerTy(DAG.getDataLayout());
+ SDValue Callee = DAG.getExternalSymbol(LibCall, PtrVT);
+ Type *RetTy = Type::getInt32Ty(*DAG.getContext());
+ ArgListTy Args;
+ SDValue Chain = DAG.getEntryNode();
+ Chain = LowerF128_LibCallArg(Chain, Args, LHS, DL, DAG);
+ Chain = LowerF128_LibCallArg(Chain, Args, RHS, DL, DAG);
+
+ TargetLowering::CallLoweringInfo CLI(DAG);
+ CLI.setDebugLoc(DL).setChain(Chain)
+ .setCallee(CallingConv::C, RetTy, Callee, std::move(Args));
+
+ std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
+
+ // result is in first, and chain is in second result.
+ SDValue Result = CallInfo.first;
+
+ switch(SPCC) {
+ default: {
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UL : {
+ SDValue Mask = DAG.getTargetConstant(1, DL, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_ULE: {
+ SDValue RHS = DAG.getTargetConstant(2, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UG : {
+ SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
+ SPCC = SPCC::ICC_G;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UGE: {
+ SDValue RHS = DAG.getTargetConstant(1, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+
+ case SPCC::FCC_U : {
+ SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
+ SPCC = SPCC::ICC_E;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_O : {
+ SDValue RHS = DAG.getTargetConstant(3, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_LG : {
+ SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
+ SPCC = SPCC::ICC_NE;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ case SPCC::FCC_UE : {
+ SDValue Mask = DAG.getTargetConstant(3, DL, Result.getValueType());
+ Result = DAG.getNode(ISD::AND, DL, Result.getValueType(), Result, Mask);
+ SDValue RHS = DAG.getTargetConstant(0, DL, Result.getValueType());
+ SPCC = SPCC::ICC_E;
+ return DAG.getNode(SPISD::CMPICC, DL, MVT::Glue, Result, RHS);
+ }
+ }
+}
+
+static SDValue
+LowerF128_FPEXTEND(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+
+ if (Op.getOperand(0).getValueType() == MVT::f64)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPEXT_F64_F128), 1);
+
+ if (Op.getOperand(0).getValueType() == MVT::f32)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPEXT_F32_F128), 1);
+
+ llvm_unreachable("fpextend with non-float operand!");
+ return SDValue();
+}
+
+static SDValue
+LowerF128_FPROUND(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+ // FP_ROUND on f64 and f32 are legal.
+ if (Op.getOperand(0).getValueType() != MVT::f128)
+ return Op;
+
+ if (Op.getValueType() == MVT::f64)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPROUND_F128_F64), 1);
+ if (Op.getValueType() == MVT::f32)
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(RTLIB::FPROUND_F128_F32), 1);
+
+ llvm_unreachable("fpround to non-float!");
+ return SDValue();
+}
+
+static SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+ assert(VT == MVT::i32 || VT == MVT::i64);
+
+ // Expand f128 operations to fp128 abi calls.
+ if (Op.getOperand(0).getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(VT))) {
+ const char *libName = TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOSINT_F128_I32
+ : RTLIB::FPTOSINT_F128_I64);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
+
+ // Expand if the resulting type is illegal.
+ if (!TLI.isTypeLegal(VT))
+ return SDValue();
+
+ // Otherwise, Convert the fp value to integer in an FP register.
+ if (VT == MVT::i32)
+ Op = DAG.getNode(SPISD::FTOI, dl, MVT::f32, Op.getOperand(0));
+ else
+ Op = DAG.getNode(SPISD::FTOX, dl, MVT::f64, Op.getOperand(0));
+
+ return DAG.getNode(ISD::BITCAST, dl, VT, Op);
+}
+
+static SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || (OpVT == MVT::i64));
+
+ EVT floatVT = (OpVT == MVT::i32) ? MVT::f32 : MVT::f64;
+
+ // Expand f128 operations to fp128 ABI calls.
+ if (Op.getValueType() == MVT::f128
+ && (!hasHardQuad || !TLI.isTypeLegal(OpVT))) {
+ const char *libName = TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::SINTTOFP_I32_F128
+ : RTLIB::SINTTOFP_I64_F128);
+ return TLI.LowerF128Op(Op, DAG, libName, 1);
+ }
+
+ // Expand if the operand type is illegal.
+ if (!TLI.isTypeLegal(OpVT))
+ return SDValue();
+
+ // Otherwise, Convert the int value to FP in an FP register.
+ SDValue Tmp = DAG.getNode(ISD::BITCAST, dl, floatVT, Op.getOperand(0));
+ unsigned opcode = (OpVT == MVT::i32)? SPISD::ITOF : SPISD::XTOF;
+ return DAG.getNode(opcode, dl, Op.getValueType(), Tmp);
+}
+
+static SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT VT = Op.getValueType();
+
+ // Expand if it does not involve f128 or the target has support for
+ // quad floating point instructions and the resulting type is legal.
+ if (Op.getOperand(0).getValueType() != MVT::f128 ||
+ (hasHardQuad && TLI.isTypeLegal(VT)))
+ return SDValue();
+
+ assert(VT == MVT::i32 || VT == MVT::i64);
+
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(VT == MVT::i32
+ ? RTLIB::FPTOUINT_F128_I32
+ : RTLIB::FPTOUINT_F128_I64),
+ 1);
+}
+
+static SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDLoc dl(Op);
+ EVT OpVT = Op.getOperand(0).getValueType();
+ assert(OpVT == MVT::i32 || OpVT == MVT::i64);
+
+ // Expand if it does not involve f128 or the target has support for
+ // quad floating point instructions and the operand type is legal.
+ if (Op.getValueType() != MVT::f128 || (hasHardQuad && TLI.isTypeLegal(OpVT)))
+ return SDValue();
+
+ return TLI.LowerF128Op(Op, DAG,
+ TLI.getLibcallName(OpVT == MVT::i32
+ ? RTLIB::UINTTOFP_I32_F128
+ : RTLIB::UINTTOFP_I64_F128),
+ 1);
+}
+
+static SDValue LowerBR_CC(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDValue Chain = Op.getOperand(0);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get();
+ SDValue LHS = Op.getOperand(2);
+ SDValue RHS = Op.getOperand(3);
+ SDValue Dest = Op.getOperand(4);
+ SDLoc dl(Op);
+ unsigned Opc, SPCC = ~0U;
+
+ // If this is a br_cc of a "setcc", and if the setcc got lowered into
+ // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
+ LookThroughSetCC(LHS, RHS, CC, SPCC);
+
+ // Get the condition flag.
+ SDValue CompareFlag;
+ if (LHS.getValueType().isInteger()) {
+ CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
+ if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
+ // 32-bit compares use the icc flags, 64-bit uses the xcc flags.
+ Opc = LHS.getValueType() == MVT::i32 ? SPISD::BRICC : SPISD::BRXCC;
+ } else {
+ if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
+ Opc = SPISD::BRICC;
+ } else {
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ Opc = SPISD::BRFCC;
+ }
+ }
+ return DAG.getNode(Opc, dl, MVT::Other, Chain, Dest,
+ DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
+}
+
+static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ bool hasHardQuad) {
+ SDValue LHS = Op.getOperand(0);
+ SDValue RHS = Op.getOperand(1);
+ ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get();
+ SDValue TrueVal = Op.getOperand(2);
+ SDValue FalseVal = Op.getOperand(3);
+ SDLoc dl(Op);
+ unsigned Opc, SPCC = ~0U;
+
+ // If this is a select_cc of a "setcc", and if the setcc got lowered into
+ // an CMP[IF]CC/SELECT_[IF]CC pair, find the original compared values.
+ LookThroughSetCC(LHS, RHS, CC, SPCC);
+
+ SDValue CompareFlag;
+ if (LHS.getValueType().isInteger()) {
+ CompareFlag = DAG.getNode(SPISD::CMPICC, dl, MVT::Glue, LHS, RHS);
+ Opc = LHS.getValueType() == MVT::i32 ?
+ SPISD::SELECT_ICC : SPISD::SELECT_XCC;
+ if (SPCC == ~0U) SPCC = IntCondCCodeToICC(CC);
+ } else {
+ if (!hasHardQuad && LHS.getValueType() == MVT::f128) {
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ CompareFlag = TLI.LowerF128Compare(LHS, RHS, SPCC, dl, DAG);
+ Opc = SPISD::SELECT_ICC;
+ } else {
+ CompareFlag = DAG.getNode(SPISD::CMPFCC, dl, MVT::Glue, LHS, RHS);
+ Opc = SPISD::SELECT_FCC;
+ if (SPCC == ~0U) SPCC = FPCondCCodeToFCC(CC);
+ }
+ }
+ return DAG.getNode(Opc, dl, TrueVal.getValueType(), TrueVal, FalseVal,
+ DAG.getConstant(SPCC, dl, MVT::i32), CompareFlag);
+}
+
+SDValue SparcTargetLowering::LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const {
+ SDLoc DL(Op);
+ return DAG.getNode(SPISD::EH_SJLJ_SETJMP, DL,
+ DAG.getVTList(MVT::i32, MVT::Other), Op.getOperand(0), Op.getOperand(1));
+
+}
+
+SDValue SparcTargetLowering::LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const {
+ SDLoc DL(Op);
+ return DAG.getNode(SPISD::EH_SJLJ_LONGJMP, DL, MVT::Other, Op.getOperand(0), Op.getOperand(1));
+}
+
+static SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ SparcMachineFunctionInfo *FuncInfo = MF.getInfo<SparcMachineFunctionInfo>();
+ auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
+
+ // Need frame address to find the address of VarArgsFrameIndex.
+ MF.getFrameInfo().setFrameAddressIsTaken(true);
+
+ // vastart just stores the address of the VarArgsFrameIndex slot into the
+ // memory location argument.
+ SDLoc DL(Op);
+ SDValue Offset =
+ DAG.getNode(ISD::ADD, DL, PtrVT, DAG.getRegister(SP::I6, PtrVT),
+ DAG.getIntPtrConstant(FuncInfo->getVarArgsFrameOffset(), DL));
+ const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
+ return DAG.getStore(Op.getOperand(0), DL, Offset, Op.getOperand(1),
+ MachinePointerInfo(SV));
+}
+
+static SDValue LowerVAARG(SDValue Op, SelectionDAG &DAG) {
+ SDNode *Node = Op.getNode();
+ EVT VT = Node->getValueType(0);
+ SDValue InChain = Node->getOperand(0);
+ SDValue VAListPtr = Node->getOperand(1);
+ EVT PtrVT = VAListPtr.getValueType();
+ const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
+ SDLoc DL(Node);
+ SDValue VAList =
+ DAG.getLoad(PtrVT, DL, InChain, VAListPtr, MachinePointerInfo(SV));
+ // Increment the pointer, VAList, to the next vaarg.
+ SDValue NextPtr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
+ DAG.getIntPtrConstant(VT.getSizeInBits()/8,
+ DL));
+ // Store the incremented VAList to the legalized pointer.
+ InChain = DAG.getStore(VAList.getValue(1), DL, NextPtr, VAListPtr,
+ MachinePointerInfo(SV));
+ // Load the actual argument out of the pointer VAList.
+ // We can't count on greater alignment than the word size.
+ return DAG.getLoad(VT, DL, InChain, VAList, MachinePointerInfo(),
+ std::min(PtrVT.getSizeInBits(), VT.getSizeInBits()) / 8);
+}
+
+static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
+ SDValue Chain = Op.getOperand(0); // Legalize the chain.
+ SDValue Size = Op.getOperand(1); // Legalize the size.
+ unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
+ unsigned StackAlign = Subtarget->getFrameLowering()->getStackAlignment();
+ EVT VT = Size->getValueType(0);
+ SDLoc dl(Op);
+
+ // TODO: implement over-aligned alloca. (Note: also implies
+ // supporting support for overaligned function frames + dynamic
+ // allocations, at all, which currently isn't supported)
+ if (Align > StackAlign) {
+ const MachineFunction &MF = DAG.getMachineFunction();
+ report_fatal_error("Function \"" + Twine(MF.getName()) + "\": "
+ "over-aligned dynamic alloca not supported.");
+ }
+
+ // The resultant pointer needs to be above the register spill area
+ // at the bottom of the stack.
+ unsigned regSpillArea;
+ if (Subtarget->is64Bit()) {
+ regSpillArea = 128;
+ } else {
+ // On Sparc32, the size of the spill area is 92. Unfortunately,
+ // that's only 4-byte aligned, not 8-byte aligned (the stack
+ // pointer is 8-byte aligned). So, if the user asked for an 8-byte
+ // aligned dynamic allocation, we actually need to add 96 to the
+ // bottom of the stack, instead of 92, to ensure 8-byte alignment.
+
+ // That also means adding 4 to the size of the allocation --
+ // before applying the 8-byte rounding. Unfortunately, we the
+ // value we get here has already had rounding applied. So, we need
+ // to add 8, instead, wasting a bit more memory.
+
+ // Further, this only actually needs to be done if the required
+ // alignment is > 4, but, we've lost that info by this point, too,
+ // so we always apply it.
+
+ // (An alternative approach would be to always reserve 96 bytes
+ // instead of the required 92, but then we'd waste 4 extra bytes
+ // in every frame, not just those with dynamic stack allocations)
+
+ // TODO: modify code in SelectionDAGBuilder to make this less sad.
+
+ Size = DAG.getNode(ISD::ADD, dl, VT, Size,
+ DAG.getConstant(8, dl, VT));
+ regSpillArea = 96;
+ }
+
+ unsigned SPReg = SP::O6;
+ SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
+ SDValue NewSP = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
+ Chain = DAG.getCopyToReg(SP.getValue(1), dl, SPReg, NewSP); // Output chain
+
+ regSpillArea += Subtarget->getStackPointerBias();
+
+ SDValue NewVal = DAG.getNode(ISD::ADD, dl, VT, NewSP,
+ DAG.getConstant(regSpillArea, dl, VT));
+ SDValue Ops[2] = { NewVal, Chain };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+
+static SDValue getFLUSHW(SDValue Op, SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ SDValue Chain = DAG.getNode(SPISD::FLUSHW,
+ dl, MVT::Other, DAG.getEntryNode());
+ return Chain;
+}
+
+static SDValue getFRAMEADDR(uint64_t depth, SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
+ MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
+ MFI.setFrameAddressIsTaken(true);
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ unsigned FrameReg = SP::I6;
+ unsigned stackBias = Subtarget->getStackPointerBias();
+
+ SDValue FrameAddr;
+
+ if (depth == 0) {
+ FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
+ if (Subtarget->is64Bit())
+ FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(stackBias, dl));
+ return FrameAddr;
+ }
+
+ // flush first to make sure the windowed registers' values are in stack
+ SDValue Chain = getFLUSHW(Op, DAG);
+ FrameAddr = DAG.getCopyFromReg(Chain, dl, FrameReg, VT);
+
+ unsigned Offset = (Subtarget->is64Bit()) ? (stackBias + 112) : 56;
+
+ while (depth--) {
+ SDValue Ptr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(Offset, dl));
+ FrameAddr = DAG.getLoad(VT, dl, Chain, Ptr, MachinePointerInfo());
+ }
+ if (Subtarget->is64Bit())
+ FrameAddr = DAG.getNode(ISD::ADD, dl, VT, FrameAddr,
+ DAG.getIntPtrConstant(stackBias, dl));
+ return FrameAddr;
+}
+
+
+static SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG,
+ const SparcSubtarget *Subtarget) {
+
+ uint64_t depth = Op.getConstantOperandVal(0);
+
+ return getFRAMEADDR(depth, Op, DAG, Subtarget);
+
+}
+
+static SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI,
+ const SparcSubtarget *Subtarget) {
+ MachineFunction &MF = DAG.getMachineFunction();
+ MachineFrameInfo &MFI = MF.getFrameInfo();
+ MFI.setReturnAddressIsTaken(true);
+
+ if (TLI.verifyReturnAddressArgumentIsConstant(Op, DAG))
+ return SDValue();
+
+ EVT VT = Op.getValueType();
+ SDLoc dl(Op);
+ uint64_t depth = Op.getConstantOperandVal(0);
+
+ SDValue RetAddr;
+ if (depth == 0) {
+ auto PtrVT = TLI.getPointerTy(DAG.getDataLayout());
+ unsigned RetReg = MF.addLiveIn(SP::I7, TLI.getRegClassFor(PtrVT));
+ RetAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, RetReg, VT);
+ return RetAddr;
+ }
+
+ // Need frame address to find return address of the caller.
+ SDValue FrameAddr = getFRAMEADDR(depth - 1, Op, DAG, Subtarget);
+
+ unsigned Offset = (Subtarget->is64Bit()) ? 120 : 60;
+ SDValue Ptr = DAG.getNode(ISD::ADD,
+ dl, VT,
+ FrameAddr,
+ DAG.getIntPtrConstant(Offset, dl));
+ RetAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), Ptr, MachinePointerInfo());
+
+ return RetAddr;
+}
+
+static SDValue LowerF64Op(SDValue SrcReg64, const SDLoc &dl, SelectionDAG &DAG,
+ unsigned opcode) {
+ assert(SrcReg64.getValueType() == MVT::f64 && "LowerF64Op called on non-double!");
+ assert(opcode == ISD::FNEG || opcode == ISD::FABS);
+
+ // Lower fneg/fabs on f64 to fneg/fabs on f32.
+ // fneg f64 => fneg f32:sub_even, fmov f32:sub_odd.
+ // fabs f64 => fabs f32:sub_even, fmov f32:sub_odd.
+
+ // Note: in little-endian, the floating-point value is stored in the
+ // registers are in the opposite order, so the subreg with the sign
+ // bit is the highest-numbered (odd), rather than the
+ // lowest-numbered (even).
+
+ SDValue Hi32 = DAG.getTargetExtractSubreg(SP::sub_even, dl, MVT::f32,
+ SrcReg64);
+ SDValue Lo32 = DAG.getTargetExtractSubreg(SP::sub_odd, dl, MVT::f32,
+ SrcReg64);
+
+ if (DAG.getDataLayout().isLittleEndian())
+ Lo32 = DAG.getNode(opcode, dl, MVT::f32, Lo32);
+ else
+ Hi32 = DAG.getNode(opcode, dl, MVT::f32, Hi32);
+
+ SDValue DstReg64 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f64), 0);
+ DstReg64 = DAG.getTargetInsertSubreg(SP::sub_even, dl, MVT::f64,
+ DstReg64, Hi32);
+ DstReg64 = DAG.getTargetInsertSubreg(SP::sub_odd, dl, MVT::f64,
+ DstReg64, Lo32);
+ return DstReg64;
+}
+
+// Lower a f128 load into two f64 loads.
+static SDValue LowerF128Load(SDValue Op, SelectionDAG &DAG)
+{
+ SDLoc dl(Op);
+ LoadSDNode *LdNode = dyn_cast<LoadSDNode>(Op.getNode());
+ assert(LdNode && LdNode->getOffset().isUndef()
+ && "Unexpected node type");
+
+ unsigned alignment = LdNode->getAlignment();
+ if (alignment > 8)
+ alignment = 8;
+
+ SDValue Hi64 =
+ DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LdNode->getBasePtr(),
+ LdNode->getPointerInfo(), alignment);
+ EVT addrVT = LdNode->getBasePtr().getValueType();
+ SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
+ LdNode->getBasePtr(),
+ DAG.getConstant(8, dl, addrVT));
+ SDValue Lo64 = DAG.getLoad(MVT::f64, dl, LdNode->getChain(), LoPtr,
+ LdNode->getPointerInfo(), alignment);
+
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
+
+ SDNode *InFP128 = DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f128);
+ InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
+ MVT::f128,
+ SDValue(InFP128, 0),
+ Hi64,
+ SubRegEven);
+ InFP128 = DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, dl,
+ MVT::f128,
+ SDValue(InFP128, 0),
+ Lo64,
+ SubRegOdd);
+ SDValue OutChains[2] = { SDValue(Hi64.getNode(), 1),
+ SDValue(Lo64.getNode(), 1) };
+ SDValue OutChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+ SDValue Ops[2] = {SDValue(InFP128,0), OutChain};
+ return DAG.getMergeValues(Ops, dl);
+}
+
+static SDValue LowerLOAD(SDValue Op, SelectionDAG &DAG)
+{
+ LoadSDNode *LdNode = cast<LoadSDNode>(Op.getNode());
+
+ EVT MemVT = LdNode->getMemoryVT();
+ if (MemVT == MVT::f128)
+ return LowerF128Load(Op, DAG);
+
+ return Op;
+}
+
+// Lower a f128 store into two f64 stores.
+static SDValue LowerF128Store(SDValue Op, SelectionDAG &DAG) {
+ SDLoc dl(Op);
+ StoreSDNode *StNode = dyn_cast<StoreSDNode>(Op.getNode());
+ assert(StNode && StNode->getOffset().isUndef()
+ && "Unexpected node type");
+ SDValue SubRegEven = DAG.getTargetConstant(SP::sub_even64, dl, MVT::i32);
+ SDValue SubRegOdd = DAG.getTargetConstant(SP::sub_odd64, dl, MVT::i32);
+
+ SDNode *Hi64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ dl,
+ MVT::f64,
+ StNode->getValue(),
+ SubRegEven);
+ SDNode *Lo64 = DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG,
+ dl,
+ MVT::f64,
+ StNode->getValue(),
+ SubRegOdd);
+
+ unsigned alignment = StNode->getAlignment();
+ if (alignment > 8)
+ alignment = 8;
+
+ SDValue OutChains[2];
+ OutChains[0] =
+ DAG.getStore(StNode->getChain(), dl, SDValue(Hi64, 0),
+ StNode->getBasePtr(), MachinePointerInfo(), alignment);
+ EVT addrVT = StNode->getBasePtr().getValueType();
+ SDValue LoPtr = DAG.getNode(ISD::ADD, dl, addrVT,
+ StNode->getBasePtr(),
+ DAG.getConstant(8, dl, addrVT));
+ OutChains[1] = DAG.getStore(StNode->getChain(), dl, SDValue(Lo64, 0), LoPtr,
+ MachinePointerInfo(), alignment);
+ return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
+}
+
+static SDValue LowerSTORE(SDValue Op, SelectionDAG &DAG)
+{
+ SDLoc dl(Op);
+ StoreSDNode *St = cast<StoreSDNode>(Op.getNode());
+
+ EVT MemVT = St->getMemoryVT();
+ if (MemVT == MVT::f128)
+ return LowerF128Store(Op, DAG);
+
+ if (MemVT == MVT::i64) {
+ // Custom handling for i64 stores: turn it into a bitcast and a
+ // v2i32 store.
+ SDValue Val = DAG.getNode(ISD::BITCAST, dl, MVT::v2i32, St->getValue());
+ SDValue Chain = DAG.getStore(
+ St->getChain(), dl, Val, St->getBasePtr(), St->getPointerInfo(),
+ St->getAlignment(), St->getMemOperand()->getFlags(), St->getAAInfo());
+ return Chain;
+ }
+
+ return SDValue();
+}
+
+static SDValue LowerFNEGorFABS(SDValue Op, SelectionDAG &DAG, bool isV9) {
+ assert((Op.getOpcode() == ISD::FNEG || Op.getOpcode() == ISD::FABS)
+ && "invalid opcode");
+
+ SDLoc dl(Op);
+
+ if (Op.getValueType() == MVT::f64)
+ return LowerF64Op(Op.getOperand(0), dl, DAG, Op.getOpcode());
+ if (Op.getValueType() != MVT::f128)
+ return Op;
+
+ // Lower fabs/fneg on f128 to fabs/fneg on f64
+ // fabs/fneg f128 => fabs/fneg f64:sub_even64, fmov f64:sub_odd64
+ // (As with LowerF64Op, on little-endian, we need to negate the odd
+ // subreg)
+
+ SDValue SrcReg128 = Op.getOperand(0);
+ SDValue Hi64 = DAG.getTargetExtractSubreg(SP::sub_even64, dl, MVT::f64,
+ SrcReg128);
+ SDValue Lo64 = DAG.getTargetExtractSubreg(SP::sub_odd64, dl, MVT::f64,
+ SrcReg128);
+
+ if (DAG.getDataLayout().isLittleEndian()) {
+ if (isV9)
+ Lo64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Lo64);
+ else
+ Lo64 = LowerF64Op(Lo64, dl, DAG, Op.getOpcode());
+ } else {
+ if (isV9)
+ Hi64 = DAG.getNode(Op.getOpcode(), dl, MVT::f64, Hi64);
+ else
+ Hi64 = LowerF64Op(Hi64, dl, DAG, Op.getOpcode());
+ }
+
+ SDValue DstReg128 = SDValue(DAG.getMachineNode(TargetOpcode::IMPLICIT_DEF,
+ dl, MVT::f128), 0);
+ DstReg128 = DAG.getTargetInsertSubreg(SP::sub_even64, dl, MVT::f128,
+ DstReg128, Hi64);
+ DstReg128 = DAG.getTargetInsertSubreg(SP::sub_odd64, dl, MVT::f128,
+ DstReg128, Lo64);
+ return DstReg128;
+}
+
+static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
+
+ if (Op.getValueType() != MVT::i64)
+ return Op;
+
+ SDLoc dl(Op);
+ SDValue Src1 = Op.getOperand(0);
+ SDValue Src1Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1);
+ SDValue Src1Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src1,
+ DAG.getConstant(32, dl, MVT::i64));
+ Src1Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src1Hi);
+
+ SDValue Src2 = Op.getOperand(1);
+ SDValue Src2Lo = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2);
+ SDValue Src2Hi = DAG.getNode(ISD::SRL, dl, MVT::i64, Src2,
+ DAG.getConstant(32, dl, MVT::i64));
+ Src2Hi = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Src2Hi);
+
+
+ bool hasChain = false;
+ unsigned hiOpc = Op.getOpcode();
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Invalid opcode");
+ case ISD::ADDC: hiOpc = ISD::ADDE; break;
+ case ISD::ADDE: hasChain = true; break;
+ case ISD::SUBC: hiOpc = ISD::SUBE; break;
+ case ISD::SUBE: hasChain = true; break;
+ }
+ SDValue Lo;
+ SDVTList VTs = DAG.getVTList(MVT::i32, MVT::Glue);
+ if (hasChain) {
+ Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo,
+ Op.getOperand(2));
+ } else {
+ Lo = DAG.getNode(Op.getOpcode(), dl, VTs, Src1Lo, Src2Lo);
+ }
+ SDValue Hi = DAG.getNode(hiOpc, dl, VTs, Src1Hi, Src2Hi, Lo.getValue(1));
+ SDValue Carry = Hi.getValue(1);
+
+ Lo = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Lo);
+ Hi = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Hi);
+ Hi = DAG.getNode(ISD::SHL, dl, MVT::i64, Hi,
+ DAG.getConstant(32, dl, MVT::i64));
+
+ SDValue Dst = DAG.getNode(ISD::OR, dl, MVT::i64, Hi, Lo);
+ SDValue Ops[2] = { Dst, Carry };
+ return DAG.getMergeValues(Ops, dl);
+}
+
+// Custom lower UMULO/SMULO for SPARC. This code is similar to ExpandNode()
+// in LegalizeDAG.cpp except the order of arguments to the library function.
+static SDValue LowerUMULO_SMULO(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI)
+{
+ unsigned opcode = Op.getOpcode();
+ assert((opcode == ISD::UMULO || opcode == ISD::SMULO) && "Invalid Opcode.");
+
+ bool isSigned = (opcode == ISD::SMULO);
+ EVT VT = MVT::i64;
+ EVT WideVT = MVT::i128;
+ SDLoc dl(Op);
+ SDValue LHS = Op.getOperand(0);
+
+ if (LHS.getValueType() != VT)
+ return Op;
+
+ SDValue ShiftAmt = DAG.getConstant(63, dl, VT);
+
+ SDValue RHS = Op.getOperand(1);
+ SDValue HiLHS = DAG.getNode(ISD::SRA, dl, VT, LHS, ShiftAmt);
+ SDValue HiRHS = DAG.getNode(ISD::SRA, dl, MVT::i64, RHS, ShiftAmt);
+ SDValue Args[] = { HiLHS, LHS, HiRHS, RHS };
+
+ SDValue MulResult = TLI.makeLibCall(DAG,
+ RTLIB::MUL_I128, WideVT,
+ Args, isSigned, dl).first;
+ SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
+ MulResult, DAG.getIntPtrConstant(0, dl));
+ SDValue TopHalf = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, VT,
+ MulResult, DAG.getIntPtrConstant(1, dl));
+ if (isSigned) {
+ SDValue Tmp1 = DAG.getNode(ISD::SRA, dl, VT, BottomHalf, ShiftAmt);
+ TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, Tmp1, ISD::SETNE);
+ } else {
+ TopHalf = DAG.getSetCC(dl, MVT::i32, TopHalf, DAG.getConstant(0, dl, VT),
+ ISD::SETNE);
+ }
+ // MulResult is a node with an illegal type. Because such things are not
+ // generally permitted during this phase of legalization, ensure that
+ // nothing is left using the node. The above EXTRACT_ELEMENT nodes should have
+ // been folded.
+ assert(MulResult->use_empty() && "Illegally typed node still in use!");
+
+ SDValue Ops[2] = { BottomHalf, TopHalf } ;
+ return DAG.getMergeValues(Ops, dl);
+}
+
+static SDValue LowerATOMIC_LOAD_STORE(SDValue Op, SelectionDAG &DAG) {
+ if (isStrongerThanMonotonic(cast<AtomicSDNode>(Op)->getOrdering()))
+ // Expand with a fence.
+ return SDValue();
+
+ // Monotonic load/stores are legal.
+ return Op;
+}
+
+SDValue SparcTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
+ SelectionDAG &DAG) const {
+ unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
+ SDLoc dl(Op);
+ switch (IntNo) {
+ default: return SDValue(); // Don't custom lower most intrinsics.
+ case Intrinsic::thread_pointer: {
+ EVT PtrVT = getPointerTy(DAG.getDataLayout());
+ return DAG.getRegister(SP::G7, PtrVT);
+ }
+ }
+}
+
+SDValue SparcTargetLowering::
+LowerOperation(SDValue Op, SelectionDAG &DAG) const {
+
+ bool hasHardQuad = Subtarget->hasHardQuad();
+ bool isV9 = Subtarget->isV9();
+
+ switch (Op.getOpcode()) {
+ default: llvm_unreachable("Should not custom lower this!");
+
+ case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG, *this,
+ Subtarget);
+ case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG,
+ Subtarget);
+ case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
+ case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
+ case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
+ case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
+ case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::BR_CC: return LowerBR_CC(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG, *this,
+ hasHardQuad);
+ case ISD::EH_SJLJ_SETJMP: return LowerEH_SJLJ_SETJMP(Op, DAG, *this);
+ case ISD::EH_SJLJ_LONGJMP: return LowerEH_SJLJ_LONGJMP(Op, DAG, *this);
+ case ISD::VASTART: return LowerVASTART(Op, DAG, *this);
+ case ISD::VAARG: return LowerVAARG(Op, DAG);
+ case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG,
+ Subtarget);
+
+ case ISD::LOAD: return LowerLOAD(Op, DAG);
+ case ISD::STORE: return LowerSTORE(Op, DAG);
+ case ISD::FADD: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::ADD_F128), 2);
+ case ISD::FSUB: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::SUB_F128), 2);
+ case ISD::FMUL: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::MUL_F128), 2);
+ case ISD::FDIV: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::DIV_F128), 2);
+ case ISD::FSQRT: return LowerF128Op(Op, DAG,
+ getLibcallName(RTLIB::SQRT_F128),1);
+ case ISD::FABS:
+ case ISD::FNEG: return LowerFNEGorFABS(Op, DAG, isV9);
+ case ISD::FP_EXTEND: return LowerF128_FPEXTEND(Op, DAG, *this);
+ case ISD::FP_ROUND: return LowerF128_FPROUND(Op, DAG, *this);
+ case ISD::ADDC:
+ case ISD::ADDE:
+ case ISD::SUBC:
+ case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
+ case ISD::UMULO:
+ case ISD::SMULO: return LowerUMULO_SMULO(Op, DAG, *this);
+ case ISD::ATOMIC_LOAD:
+ case ISD::ATOMIC_STORE: return LowerATOMIC_LOAD_STORE(Op, DAG);
+ case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
+ }
+}
+
+MachineBasicBlock *
+SparcTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *BB) const {
+ switch (MI.getOpcode()) {
+ default: llvm_unreachable("Unknown SELECT_CC!");
+ case SP::SELECT_CC_Int_ICC:
+ case SP::SELECT_CC_FP_ICC:
+ case SP::SELECT_CC_DFP_ICC:
+ case SP::SELECT_CC_QFP_ICC:
+ return expandSelectCC(MI, BB, SP::BCOND);
+ case SP::SELECT_CC_Int_FCC:
+ case SP::SELECT_CC_FP_FCC:
+ case SP::SELECT_CC_DFP_FCC:
+ case SP::SELECT_CC_QFP_FCC:
+ return expandSelectCC(MI, BB, SP::FBCOND);
+ case SP::EH_SJLJ_SETJMP32ri:
+ case SP::EH_SJLJ_SETJMP32rr:
+ return emitEHSjLjSetJmp(MI, BB);
+ case SP::EH_SJLJ_LONGJMP32rr:
+ case SP::EH_SJLJ_LONGJMP32ri:
+ return emitEHSjLjLongJmp(MI, BB);
+
+ }
+}
+
+MachineBasicBlock *
+SparcTargetLowering::expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
+ unsigned BROpcode) const {
+ const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
+ DebugLoc dl = MI.getDebugLoc();
+ unsigned CC = (SPCC::CondCodes)MI.getOperand(3).getImm();
+
+ // To "insert" a SELECT_CC instruction, we actually have to insert the
+ // triangle control-flow pattern. The incoming instruction knows the
+ // destination vreg to set, the condition code register to branch on, the
+ // true/false values to select between, and the condition code for the branch.
+ //
+ // We produce the following control flow:
+ // ThisMBB
+ // | \
+ // | IfFalseMBB
+ // | /
+ // SinkMBB
+ const BasicBlock *LLVM_BB = BB->getBasicBlock();
+ MachineFunction::iterator It = ++BB->getIterator();
+
+ MachineBasicBlock *ThisMBB = BB;
+ MachineFunction *F = BB->getParent();
+ MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ MachineBasicBlock *SinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
+ F->insert(It, IfFalseMBB);
+ F->insert(It, SinkMBB);
+
+ // Transfer the remainder of ThisMBB and its successor edges to SinkMBB.
+ SinkMBB->splice(SinkMBB->begin(), ThisMBB,
+ std::next(MachineBasicBlock::iterator(MI)), ThisMBB->end());
+ SinkMBB->transferSuccessorsAndUpdatePHIs(ThisMBB);
+
+ // Set the new successors for ThisMBB.
+ ThisMBB->addSuccessor(IfFalseMBB);
+ ThisMBB->addSuccessor(SinkMBB);
+
+ BuildMI(ThisMBB, dl, TII.get(BROpcode))
+ .addMBB(SinkMBB)
+ .addImm(CC);
+
+ // IfFalseMBB just falls through to SinkMBB.
+ IfFalseMBB->addSuccessor(SinkMBB);
+
+ // %Result = phi [ %TrueValue, ThisMBB ], [ %FalseValue, IfFalseMBB ]
+ BuildMI(*SinkMBB, SinkMBB->begin(), dl, TII.get(SP::PHI),
+ MI.getOperand(0).getReg())
+ .addReg(MI.getOperand(1).getReg())
+ .addMBB(ThisMBB)
+ .addReg(MI.getOperand(2).getReg())
+ .addMBB(IfFalseMBB);
+
+ MI.eraseFromParent(); // The pseudo instruction is gone now.
+ return SinkMBB;
+}
+
+MachineBasicBlock *
+SparcTargetLowering::emitEHSjLjLongJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MachineInstrBuilder MIB;
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ unsigned RegSize = PVT.getStoreSize();
+ assert(PVT == MVT::i32 && "Invalid Pointer Size!");
+
+ unsigned Buf = MI.getOperand(0).getReg();
+ unsigned JmpLoc = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+
+ // TO DO: If we do 64-bit handling, this perhaps should be FLUSHW, not TA 3
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::TRAPri), SP::G0).addImm(3).addImm(SPCC::ICC_A);
+
+ // Instruction to restore FP
+ const unsigned FP = SP::I6;
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(FP)
+ .addReg(Buf)
+ .addImm(0);
+
+ // Instruction to load jmp location
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(JmpLoc, RegState::Define)
+ .addReg(Buf)
+ .addImm(RegSize);
+
+ // Instruction to restore SP
+ const unsigned SP = SP::O6;
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(SP)
+ .addReg(Buf)
+ .addImm(2 * RegSize);
+
+ // Instruction to restore I7
+ MIB = BuildMI(*MBB, MI, DL, TII->get(SP::LDri))
+ .addReg(SP::I7)
+ .addReg(Buf, RegState::Kill)
+ .addImm(3 * RegSize);
+
+ // Jump to JmpLoc
+ BuildMI(*MBB, MI, DL, TII->get(SP::JMPLrr)).addReg(SP::G0).addReg(JmpLoc, RegState::Kill).addReg(SP::G0);
+
+ MI.eraseFromParent();
+ return MBB;
+}
+
+MachineBasicBlock *
+SparcTargetLowering::emitEHSjLjSetJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const {
+ DebugLoc DL = MI.getDebugLoc();
+ const TargetInstrInfo *TII = Subtarget->getInstrInfo();
+ const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
+
+ MachineFunction *MF = MBB->getParent();
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ MachineInstrBuilder MIB;
+
+ MVT PVT = getPointerTy(MF->getDataLayout());
+ unsigned RegSize = PVT.getStoreSize();
+ assert(PVT == MVT::i32 && "Invalid Pointer Size!");
+
+ unsigned DstReg = MI.getOperand(0).getReg();
+ const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
+ assert(TRI->isTypeLegalForClass(*RC, MVT::i32) && "Invalid destination!");
+ (void)TRI;
+ unsigned mainDstReg = MRI.createVirtualRegister(RC);
+ unsigned restoreDstReg = MRI.createVirtualRegister(RC);
+
+ // For v = setjmp(buf), we generate
+ //
+ // thisMBB:
+ // buf[0] = FP
+ // buf[RegSize] = restoreMBB <-- takes address of restoreMBB
+ // buf[RegSize * 2] = O6
+ // buf[RegSize * 3] = I7
+ // Ensure restoreMBB remains in the relocations list (done using a bn instruction)
+ // b mainMBB
+ //
+ // mainMBB:
+ // v_main = 0
+ // b sinkMBB
+ //
+ // restoreMBB:
+ // v_restore = 1
+ // --fall through--
+ //
+ // sinkMBB:
+ // v = phi(main, restore)
+
+ const BasicBlock *BB = MBB->getBasicBlock();
+ MachineFunction::iterator It = ++MBB->getIterator();
+ MachineBasicBlock *thisMBB = MBB;
+ MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
+ MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
+
+ MF->insert(It, mainMBB);
+ MF->insert(It, restoreMBB);
+ MF->insert(It, sinkMBB);
+ restoreMBB->setHasAddressTaken();
+
+ // Transfer the remainder of BB and its successor edges to sinkMBB.
+ sinkMBB->splice(sinkMBB->begin(), MBB,
+ std::next(MachineBasicBlock::iterator(MI)),
+ MBB->end());
+ sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
+
+ unsigned LabelReg = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+ unsigned LabelReg2 = MRI.createVirtualRegister(&SP::IntRegsRegClass);
+ unsigned BufReg = MI.getOperand(1).getReg();
+
+ // Instruction to store FP
+ const unsigned FP = SP::I6;
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(0)
+ .addReg(FP);
+
+ // Instructions to store jmp location
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::SETHIi))
+ .addReg(LabelReg, RegState::Define)
+ .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_HI);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::ORri))
+ .addReg(LabelReg2, RegState::Define)
+ .addReg(LabelReg, RegState::Kill)
+ .addMBB(restoreMBB, SparcMCExpr::VK_Sparc_LO);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(RegSize)
+ .addReg(LabelReg2, RegState::Kill);
+
+ // Instruction to store SP
+ const unsigned SP = SP::O6;
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(2 * RegSize)
+ .addReg(SP);
+
+ // Instruction to store I7
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::STri))
+ .addReg(BufReg)
+ .addImm(3 * RegSize)
+ .addReg(SP::I7);
+
+
+ // FIX ME: This next instruction ensures that the restoreMBB block address remains
+ // valid through optimization passes and serves no other purpose. The ICC_N ensures
+ // that the branch is never taken. This commented-out code here was an alternative
+ // attempt to achieve this which brought myriad problems.
+ //MIB = BuildMI(thisMBB, DL, TII->get(SP::EH_SjLj_Setup)).addMBB(restoreMBB, SparcMCExpr::VK_Sparc_None);
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
+ .addMBB(restoreMBB)
+ .addImm(SPCC::ICC_N);
+
+ MIB = BuildMI(thisMBB, DL, TII->get(SP::BCOND))
+ .addMBB(mainMBB)
+ .addImm(SPCC::ICC_A);
+
+ thisMBB->addSuccessor(mainMBB);
+ thisMBB->addSuccessor(restoreMBB);
+
+
+ // mainMBB:
+ MIB = BuildMI(mainMBB, DL, TII->get(SP::ORrr))
+ .addReg(mainDstReg, RegState::Define)
+ .addReg(SP::G0)
+ .addReg(SP::G0);
+ MIB = BuildMI(mainMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
+
+ mainMBB->addSuccessor(sinkMBB);
+
+
+ // restoreMBB:
+ MIB = BuildMI(restoreMBB, DL, TII->get(SP::ORri))
+ .addReg(restoreDstReg, RegState::Define)
+ .addReg(SP::G0)
+ .addImm(1);
+ //MIB = BuildMI(restoreMBB, DL, TII->get(SP::BCOND)).addMBB(sinkMBB).addImm(SPCC::ICC_A);
+ restoreMBB->addSuccessor(sinkMBB);
+
+ // sinkMBB:
+ MIB = BuildMI(*sinkMBB, sinkMBB->begin(), DL,
+ TII->get(SP::PHI), DstReg)
+ .addReg(mainDstReg).addMBB(mainMBB)
+ .addReg(restoreDstReg).addMBB(restoreMBB);
+
+ MI.eraseFromParent();
+ return sinkMBB;
+}
+
+//===----------------------------------------------------------------------===//
+// Sparc Inline Assembly Support
+//===----------------------------------------------------------------------===//
+
+/// getConstraintType - Given a constraint letter, return the type of
+/// constraint it is for this target.
+SparcTargetLowering::ConstraintType
+SparcTargetLowering::getConstraintType(StringRef Constraint) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ default: break;
+ case 'r':
+ case 'f':
+ case 'e':
+ return C_RegisterClass;
+ case 'I': // SIMM13
+ return C_Other;
+ }
+ }
+
+ return TargetLowering::getConstraintType(Constraint);
+}
+
+TargetLowering::ConstraintWeight SparcTargetLowering::
+getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const {
+ ConstraintWeight weight = CW_Invalid;
+ Value *CallOperandVal = info.CallOperandVal;
+ // If we don't have a value, we can't do a match,
+ // but allow it at the lowest weight.
+ if (!CallOperandVal)
+ return CW_Default;
+
+ // Look at the constraint type.
+ switch (*constraint) {
+ default:
+ weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
+ break;
+ case 'I': // SIMM13
+ if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
+ if (isInt<13>(C->getSExtValue()))
+ weight = CW_Constant;
+ }
+ break;
+ }
+ return weight;
+}
+
+/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
+/// vector. If it is invalid, don't add anything to Ops.
+void SparcTargetLowering::
+LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const {
+ SDValue Result(nullptr, 0);
+
+ // Only support length 1 constraints for now.
+ if (Constraint.length() > 1)
+ return;
+
+ char ConstraintLetter = Constraint[0];
+ switch (ConstraintLetter) {
+ default: break;
+ case 'I':
+ if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
+ if (isInt<13>(C->getSExtValue())) {
+ Result = DAG.getTargetConstant(C->getSExtValue(), SDLoc(Op),
+ Op.getValueType());
+ break;
+ }
+ return;
+ }
+ }
+
+ if (Result.getNode()) {
+ Ops.push_back(Result);
+ return;
+ }
+ TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
+}
+
+std::pair<unsigned, const TargetRegisterClass *>
+SparcTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint,
+ MVT VT) const {
+ if (Constraint.size() == 1) {
+ switch (Constraint[0]) {
+ case 'r':
+ if (VT == MVT::v2i32)
+ return std::make_pair(0U, &SP::IntPairRegClass);
+ else
+ return std::make_pair(0U, &SP::IntRegsRegClass);
+ case 'f':
+ if (VT == MVT::f32)
+ return std::make_pair(0U, &SP::FPRegsRegClass);
+ else if (VT == MVT::f64)
+ return std::make_pair(0U, &SP::LowDFPRegsRegClass);
+ else if (VT == MVT::f128)
+ return std::make_pair(0U, &SP::LowQFPRegsRegClass);
+ llvm_unreachable("Unknown ValueType for f-register-type!");
+ break;
+ case 'e':
+ if (VT == MVT::f32)
+ return std::make_pair(0U, &SP::FPRegsRegClass);
+ else if (VT == MVT::f64)
+ return std::make_pair(0U, &SP::DFPRegsRegClass);
+ else if (VT == MVT::f128)
+ return std::make_pair(0U, &SP::QFPRegsRegClass);
+ llvm_unreachable("Unknown ValueType for e-register-type!");
+ break;
+ }
+ } else if (!Constraint.empty() && Constraint.size() <= 5
+ && Constraint[0] == '{' && *(Constraint.end()-1) == '}') {
+ // constraint = '{r<d>}'
+ // Remove the braces from around the name.
+ StringRef name(Constraint.data()+1, Constraint.size()-2);
+ // Handle register aliases:
+ // r0-r7 -> g0-g7
+ // r8-r15 -> o0-o7
+ // r16-r23 -> l0-l7
+ // r24-r31 -> i0-i7
+ uint64_t intVal = 0;
+ if (name.substr(0, 1).equals("r")
+ && !name.substr(1).getAsInteger(10, intVal) && intVal <= 31) {
+ const char regTypes[] = { 'g', 'o', 'l', 'i' };
+ char regType = regTypes[intVal/8];
+ char regIdx = '0' + (intVal % 8);
+ char tmp[] = { '{', regType, regIdx, '}', 0 };
+ std::string newConstraint = std::string(tmp);
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, newConstraint,
+ VT);
+ }
+ }
+
+ return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
+}
+
+bool
+SparcTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
+ // The Sparc target isn't yet aware of offsets.
+ return false;
+}
+
+void SparcTargetLowering::ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const {
+
+ SDLoc dl(N);
+
+ RTLIB::Libcall libCall = RTLIB::UNKNOWN_LIBCALL;
+
+ switch (N->getOpcode()) {
+ default:
+ llvm_unreachable("Do not know how to custom type legalize this operation!");
+
+ case ISD::FP_TO_SINT:
+ case ISD::FP_TO_UINT:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getOperand(0).getValueType() != MVT::f128
+ || N->getValueType(0) != MVT::i64)
+ return;
+ libCall = ((N->getOpcode() == ISD::FP_TO_SINT)
+ ? RTLIB::FPTOSINT_F128_I64
+ : RTLIB::FPTOUINT_F128_I64);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+
+ case ISD::SINT_TO_FP:
+ case ISD::UINT_TO_FP:
+ // Custom lower only if it involves f128 or i64.
+ if (N->getValueType(0) != MVT::f128
+ || N->getOperand(0).getValueType() != MVT::i64)
+ return;
+
+ libCall = ((N->getOpcode() == ISD::SINT_TO_FP)
+ ? RTLIB::SINTTOFP_I64_F128
+ : RTLIB::UINTTOFP_I64_F128);
+
+ Results.push_back(LowerF128Op(SDValue(N, 0),
+ DAG,
+ getLibcallName(libCall),
+ 1));
+ return;
+ case ISD::LOAD: {
+ LoadSDNode *Ld = cast<LoadSDNode>(N);
+ // Custom handling only for i64: turn i64 load into a v2i32 load,
+ // and a bitcast.
+ if (Ld->getValueType(0) != MVT::i64 || Ld->getMemoryVT() != MVT::i64)
+ return;
+
+ SDLoc dl(N);
+ SDValue LoadRes = DAG.getExtLoad(
+ Ld->getExtensionType(), dl, MVT::v2i32, Ld->getChain(),
+ Ld->getBasePtr(), Ld->getPointerInfo(), MVT::v2i32, Ld->getAlignment(),
+ Ld->getMemOperand()->getFlags(), Ld->getAAInfo());
+
+ SDValue Res = DAG.getNode(ISD::BITCAST, dl, MVT::i64, LoadRes);
+ Results.push_back(Res);
+ Results.push_back(LoadRes.getValue(1));
+ return;
+ }
+ }
+}
+
+// Override to enable LOAD_STACK_GUARD lowering on Linux.
+bool SparcTargetLowering::useLoadStackGuardNode() const {
+ if (!Subtarget->isTargetLinux())
+ return TargetLowering::useLoadStackGuardNode();
+ return true;
+}
+
+// Override to disable global variable loading on Linux.
+void SparcTargetLowering::insertSSPDeclarations(Module &M) const {
+ if (!Subtarget->isTargetLinux())
+ return TargetLowering::insertSSPDeclarations(M);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
new file mode 100644
index 000000000000..bf700d6a99d8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcISelLowering.h
@@ -0,0 +1,223 @@
+//===-- SparcISelLowering.h - Sparc DAG Lowering Interface ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interfaces that Sparc uses to lower LLVM code into a
+// selection DAG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCISELLOWERING_H
+#define LLVM_LIB_TARGET_SPARC_SPARCISELLOWERING_H
+
+#include "Sparc.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+namespace llvm {
+ class SparcSubtarget;
+
+ namespace SPISD {
+ enum NodeType : unsigned {
+ FIRST_NUMBER = ISD::BUILTIN_OP_END,
+ CMPICC, // Compare two GPR operands, set icc+xcc.
+ CMPFCC, // Compare two FP operands, set fcc.
+ BRICC, // Branch to dest on icc condition
+ BRXCC, // Branch to dest on xcc condition (64-bit only).
+ BRFCC, // Branch to dest on fcc condition
+ SELECT_ICC, // Select between two values using the current ICC flags.
+ SELECT_XCC, // Select between two values using the current XCC flags.
+ SELECT_FCC, // Select between two values using the current FCC flags.
+
+ EH_SJLJ_SETJMP, // builtin setjmp operation
+ EH_SJLJ_LONGJMP, // builtin longjmp operation
+
+ Hi, Lo, // Hi/Lo operations, typically on a global address.
+
+ FTOI, // FP to Int within a FP register.
+ ITOF, // Int to FP within a FP register.
+ FTOX, // FP to Int64 within a FP register.
+ XTOF, // Int64 to FP within a FP register.
+
+ CALL, // A call instruction.
+ RET_FLAG, // Return with a flag operand.
+ GLOBAL_BASE_REG, // Global base reg for PIC.
+ FLUSHW, // FLUSH register windows to stack.
+
+ TLS_ADD, // For Thread Local Storage (TLS).
+ TLS_LD,
+ TLS_CALL
+ };
+ }
+
+ class SparcTargetLowering : public TargetLowering {
+ const SparcSubtarget *Subtarget;
+ public:
+ SparcTargetLowering(const TargetMachine &TM, const SparcSubtarget &STI);
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
+
+ bool useSoftFloat() const override;
+
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
+ /// in Mask are known to be either zero or one and return them in the
+ /// KnownZero/KnownOne bitsets.
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ KnownBits &Known,
+ const APInt &DemandedElts,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
+
+ MachineBasicBlock *
+ EmitInstrWithCustomInserter(MachineInstr &MI,
+ MachineBasicBlock *MBB) const override;
+
+ const char *getTargetNodeName(unsigned Opcode) const override;
+
+ ConstraintType getConstraintType(StringRef Constraint) const override;
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const override;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
+
+ unsigned
+ getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
+ if (ConstraintCode == "o")
+ return InlineAsm::Constraint_o;
+ return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
+ }
+
+ std::pair<unsigned, const TargetRegisterClass *>
+ getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
+ StringRef Constraint, MVT VT) const override;
+
+ bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
+ MVT getScalarShiftAmountTy(const DataLayout &, EVT) const override {
+ return MVT::i32;
+ }
+
+ unsigned getRegisterByName(const char* RegName, EVT VT,
+ SelectionDAG &DAG) const override;
+
+ /// If a physical register, this returns the register that receives the
+ /// exception address on entry to an EH pad.
+ unsigned
+ getExceptionPointerRegister(const Constant *PersonalityFn) const override {
+ return SP::I0;
+ }
+
+ /// If a physical register, this returns the register that receives the
+ /// exception typeid on entry to a landing pad.
+ unsigned
+ getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
+ return SP::I1;
+ }
+
+ /// Override to support customized stack guard loading.
+ bool useLoadStackGuardNode() const override;
+ void insertSSPDeclarations(Module &M) const override;
+
+ /// getSetCCResultType - Return the ISD::SETCC ValueType
+ EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
+ EVT VT) const override;
+
+ SDValue
+ LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerFormalArguments_32(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerFormalArguments_64(SDValue Chain, CallingConv::ID CallConv,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::InputArg> &Ins,
+ const SDLoc &dl, SelectionDAG &DAG,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue
+ LowerCall(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const;
+ SDValue LowerCall_64(TargetLowering::CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const;
+
+ SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &dl, SelectionDAG &DAG) const override;
+ SDValue LowerReturn_32(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+ SDValue LowerReturn_64(SDValue Chain, CallingConv::ID CallConv,
+ bool IsVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+
+ SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const ;
+ SDValue LowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG,
+ const SparcTargetLowering &TLI) const ;
+
+ unsigned getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const;
+ SDValue withTargetFlags(SDValue Op, unsigned TF, SelectionDAG &DAG) const;
+ SDValue makeHiLoPair(SDValue Op, unsigned HiTF, unsigned LoTF,
+ SelectionDAG &DAG) const;
+ SDValue makeAddress(SDValue Op, SelectionDAG &DAG) const;
+
+ SDValue LowerF128_LibCallArg(SDValue Chain, ArgListTy &Args, SDValue Arg,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+ SDValue LowerF128Op(SDValue Op, SelectionDAG &DAG,
+ const char *LibFuncName,
+ unsigned numArgs) const;
+ SDValue LowerF128Compare(SDValue LHS, SDValue RHS, unsigned &SPCC,
+ const SDLoc &DL, SelectionDAG &DAG) const;
+
+ SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
+
+ bool ShouldShrinkFPConstant(EVT VT) const override {
+ // Do not shrink FP constpool if VT == MVT::f128.
+ // (ldd, call _Q_fdtoq) is more expensive than two ldds.
+ return VT != MVT::f128;
+ }
+
+ bool shouldInsertFencesForAtomic(const Instruction *I) const override {
+ // FIXME: We insert fences for each atomics and generate
+ // sub-optimal code for PSO/TSO. (Approximately nobody uses any
+ // mode but TSO, which makes this even more silly)
+ return true;
+ }
+
+ AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
+
+ void ReplaceNodeResults(SDNode *N,
+ SmallVectorImpl<SDValue>& Results,
+ SelectionDAG &DAG) const override;
+
+ MachineBasicBlock *expandSelectCC(MachineInstr &MI, MachineBasicBlock *BB,
+ unsigned BROpcode) const;
+ MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr &MI,
+ MachineBasicBlock *MBB) const;
+ };
+} // end namespace llvm
+
+#endif // SPARC_ISELLOWERING_H
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td b/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td
new file mode 100644
index 000000000000..f6518c936ebc
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstr64Bit.td
@@ -0,0 +1,541 @@
+//===-- SparcInstr64Bit.td - 64-bit instructions for Sparc Target ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction definitions and patterns needed for 64-bit
+// code generation on SPARC v9.
+//
+// Some SPARC v9 instructions are defined in SparcInstrInfo.td because they can
+// also be used in 32-bit code running on a SPARC v9 CPU.
+//
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+// The same integer registers are used for i32 and i64 values.
+// When registers hold i32 values, the high bits are don't care.
+// This give us free trunc and anyext.
+def : Pat<(i64 (anyext i32:$val)), (COPY_TO_REGCLASS $val, I64Regs)>;
+def : Pat<(i32 (trunc i64:$val)), (COPY_TO_REGCLASS $val, IntRegs)>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Shift Instructions.
+//===----------------------------------------------------------------------===//
+//
+// The 32-bit shift instructions are still available. The left shift srl
+// instructions shift all 64 bits, but it only accepts a 5-bit shift amount.
+//
+// The srl instructions only shift the low 32 bits and clear the high 32 bits.
+// Finally, sra shifts the low 32 bits and sign-extends to 64 bits.
+
+let Predicates = [Is64Bit] in {
+
+def : Pat<(i64 (zext i32:$val)), (SRLri $val, 0)>;
+def : Pat<(i64 (sext i32:$val)), (SRAri $val, 0)>;
+
+def : Pat<(i64 (and i64:$val, 0xffffffff)), (SRLri $val, 0)>;
+def : Pat<(i64 (sext_inreg i64:$val, i32)), (SRAri $val, 0)>;
+
+defm SLLX : F3_S<"sllx", 0b100101, 1, shl, i64, I64Regs>;
+defm SRLX : F3_S<"srlx", 0b100110, 1, srl, i64, I64Regs>;
+defm SRAX : F3_S<"srax", 0b100111, 1, sra, i64, I64Regs>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Immediates.
+//===----------------------------------------------------------------------===//
+//
+// All 32-bit immediates can be materialized with sethi+or, but 64-bit
+// immediates may require more code. There may be a point where it is
+// preferable to use a constant pool load instead, depending on the
+// microarchitecture.
+
+// Single-instruction patterns.
+
+// The ALU instructions want their simm13 operands as i32 immediates.
+def as_i32imm : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
+}]>;
+def : Pat<(i64 simm13:$val), (ORri (i64 G0), (as_i32imm $val))>;
+def : Pat<(i64 SETHIimm:$val), (SETHIi (HI22 $val))>;
+
+// Double-instruction patterns.
+
+// All unsigned i32 immediates can be handled by sethi+or.
+def uimm32 : PatLeaf<(imm), [{ return isUInt<32>(N->getZExtValue()); }]>;
+def : Pat<(i64 uimm32:$val), (ORri (SETHIi (HI22 $val)), (LO10 $val))>,
+ Requires<[Is64Bit]>;
+
+// All negative i33 immediates can be handled by sethi+xor.
+def nimm33 : PatLeaf<(imm), [{
+ int64_t Imm = N->getSExtValue();
+ return Imm < 0 && isInt<33>(Imm);
+}]>;
+// Bits 10-31 inverted. Same as assembler's %hix.
+def HIX22 : SDNodeXForm<imm, [{
+ uint64_t Val = (~N->getZExtValue() >> 10) & ((1u << 22) - 1);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
+}]>;
+// Bits 0-9 with ones in bits 10-31. Same as assembler's %lox.
+def LOX10 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant(~(~N->getZExtValue() & 0x3ff), SDLoc(N),
+ MVT::i32);
+}]>;
+def : Pat<(i64 nimm33:$val), (XORri (SETHIi (HIX22 $val)), (LOX10 $val))>,
+ Requires<[Is64Bit]>;
+
+// More possible patterns:
+//
+// (sllx sethi, n)
+// (sllx simm13, n)
+//
+// 3 instrs:
+//
+// (xor (sllx sethi), simm13)
+// (sllx (xor sethi, simm13))
+//
+// 4 instrs:
+//
+// (or sethi, (sllx sethi))
+// (xnor sethi, (sllx sethi))
+//
+// 5 instrs:
+//
+// (or (sllx sethi), (or sethi, simm13))
+// (xnor (sllx sethi), (or sethi, simm13))
+// (or (sllx sethi), (sllx sethi))
+// (xnor (sllx sethi), (sllx sethi))
+//
+// Worst case is 6 instrs:
+//
+// (or (sllx (or sethi, simmm13)), (or sethi, simm13))
+
+// Bits 42-63, same as assembler's %hh.
+def HH22 : SDNodeXForm<imm, [{
+ uint64_t Val = (N->getZExtValue() >> 42) & ((1u << 22) - 1);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
+}]>;
+// Bits 32-41, same as assembler's %hm.
+def HM10 : SDNodeXForm<imm, [{
+ uint64_t Val = (N->getZExtValue() >> 32) & ((1u << 10) - 1);
+ return CurDAG->getTargetConstant(Val, SDLoc(N), MVT::i32);
+}]>;
+def : Pat<(i64 imm:$val),
+ (ORrr (SLLXri (ORri (SETHIi (HH22 $val)), (HM10 $val)), (i32 32)),
+ (ORri (SETHIi (HI22 $val)), (LO10 $val)))>,
+ Requires<[Is64Bit]>;
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Integer Arithmetic and Logic.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+// Register-register instructions.
+let isCodeGenOnly = 1 in {
+defm ANDX : F3_12<"and", 0b000001, and, I64Regs, i64, i64imm>;
+defm ORX : F3_12<"or", 0b000010, or, I64Regs, i64, i64imm>;
+defm XORX : F3_12<"xor", 0b000011, xor, I64Regs, i64, i64imm>;
+
+def ANDXNrr : F3_1<2, 0b000101,
+ (outs I64Regs:$dst), (ins I64Regs:$b, I64Regs:$c),
+ "andn $b, $c, $dst",
+ [(set i64:$dst, (and i64:$b, (not i64:$c)))]>;
+def ORXNrr : F3_1<2, 0b000110,
+ (outs I64Regs:$dst), (ins I64Regs:$b, I64Regs:$c),
+ "orn $b, $c, $dst",
+ [(set i64:$dst, (or i64:$b, (not i64:$c)))]>;
+def XNORXrr : F3_1<2, 0b000111,
+ (outs I64Regs:$dst), (ins I64Regs:$b, I64Regs:$c),
+ "xnor $b, $c, $dst",
+ [(set i64:$dst, (not (xor i64:$b, i64:$c)))]>;
+
+defm ADDX : F3_12<"add", 0b000000, add, I64Regs, i64, i64imm>;
+defm SUBX : F3_12<"sub", 0b000100, sub, I64Regs, i64, i64imm>;
+
+def TLS_ADDXrr : F3_1<2, 0b000000, (outs I64Regs:$rd),
+ (ins I64Regs:$rs1, I64Regs:$rs2, TLSSym:$sym),
+ "add $rs1, $rs2, $rd, $sym",
+ [(set i64:$rd,
+ (tlsadd i64:$rs1, i64:$rs2, tglobaltlsaddr:$sym))]>;
+
+// "LEA" form of add
+def LEAX_ADDri : F3_2<2, 0b000000,
+ (outs I64Regs:$dst), (ins MEMri:$addr),
+ "add ${addr:arith}, $dst",
+ [(set iPTR:$dst, ADDRri:$addr)]>;
+}
+
+def : Pat<(SPcmpicc i64:$a, i64:$b), (CMPrr $a, $b)>;
+def : Pat<(SPcmpicc i64:$a, (i64 simm13:$b)), (CMPri $a, (as_i32imm $b))>;
+def : Pat<(ctpop i64:$src), (POPCrr $src)>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Integer Multiply and Divide.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+def MULXrr : F3_1<2, 0b001001,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "mulx $rs1, $rs2, $rd",
+ [(set i64:$rd, (mul i64:$rs1, i64:$rs2))]>;
+def MULXri : F3_2<2, 0b001001,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$simm13),
+ "mulx $rs1, $simm13, $rd",
+ [(set i64:$rd, (mul i64:$rs1, (i64 simm13:$simm13)))]>;
+
+// Division can trap.
+let hasSideEffects = 1 in {
+def SDIVXrr : F3_1<2, 0b101101,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "sdivx $rs1, $rs2, $rd",
+ [(set i64:$rd, (sdiv i64:$rs1, i64:$rs2))]>;
+def SDIVXri : F3_2<2, 0b101101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$simm13),
+ "sdivx $rs1, $simm13, $rd",
+ [(set i64:$rd, (sdiv i64:$rs1, (i64 simm13:$simm13)))]>;
+
+def UDIVXrr : F3_1<2, 0b001101,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2),
+ "udivx $rs1, $rs2, $rd",
+ [(set i64:$rd, (udiv i64:$rs1, i64:$rs2))]>;
+def UDIVXri : F3_2<2, 0b001101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, i64imm:$simm13),
+ "udivx $rs1, $simm13, $rd",
+ [(set i64:$rd, (udiv i64:$rs1, (i64 simm13:$simm13)))]>;
+} // hasSideEffects = 1
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Loads and Stores.
+//===----------------------------------------------------------------------===//
+//
+// All the 32-bit loads and stores are available. The extending loads are sign
+// or zero-extending to 64 bits. The LDrr and LDri instructions load 32 bits
+// zero-extended to i64. Their mnemonic is lduw in SPARC v9 (Load Unsigned
+// Word).
+//
+// SPARC v9 adds 64-bit loads as well as a sign-extending ldsw i32 loads.
+
+let Predicates = [Is64Bit] in {
+
+// 64-bit loads.
+let DecoderMethod = "DecodeLoadInt" in
+ defm LDX : Load<"ldx", 0b001011, load, I64Regs, i64>;
+
+let mayLoad = 1, isCodeGenOnly = 1, isAsmParserOnly = 1 in
+ def TLS_LDXrr : F3_1<3, 0b001011,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, TLSSym:$sym),
+ "ldx [$addr], $dst, $sym",
+ [(set i64:$dst,
+ (tlsld ADDRrr:$addr, tglobaltlsaddr:$sym))]>;
+
+// Extending loads to i64.
+def : Pat<(i64 (zextloadi1 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi1 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi1 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi1 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+
+def : Pat<(i64 (zextloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRrr:$addr)), (LDUBrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi8 ADDRri:$addr)), (LDUBri ADDRri:$addr)>;
+def : Pat<(i64 (sextloadi8 ADDRrr:$addr)), (LDSBrr ADDRrr:$addr)>;
+def : Pat<(i64 (sextloadi8 ADDRri:$addr)), (LDSBri ADDRri:$addr)>;
+
+def : Pat<(i64 (zextloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRrr:$addr)), (LDUHrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi16 ADDRri:$addr)), (LDUHri ADDRri:$addr)>;
+def : Pat<(i64 (sextloadi16 ADDRrr:$addr)), (LDSHrr ADDRrr:$addr)>;
+def : Pat<(i64 (sextloadi16 ADDRri:$addr)), (LDSHri ADDRri:$addr)>;
+
+def : Pat<(i64 (zextloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
+def : Pat<(i64 (zextloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRrr:$addr)), (LDrr ADDRrr:$addr)>;
+def : Pat<(i64 (extloadi32 ADDRri:$addr)), (LDri ADDRri:$addr)>;
+
+// Sign-extending load of i32 into i64 is a new SPARC v9 instruction.
+let DecoderMethod = "DecodeLoadInt" in
+ defm LDSW : Load<"ldsw", 0b001000, sextloadi32, I64Regs, i64>;
+
+// 64-bit stores.
+let DecoderMethod = "DecodeStoreInt" in
+ defm STX : Store<"stx", 0b001110, store, I64Regs, i64>;
+
+// Truncating stores from i64 are identical to the i32 stores.
+def : Pat<(truncstorei8 i64:$src, ADDRrr:$addr), (STBrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei8 i64:$src, ADDRri:$addr), (STBri ADDRri:$addr, $src)>;
+def : Pat<(truncstorei16 i64:$src, ADDRrr:$addr), (STHrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei16 i64:$src, ADDRri:$addr), (STHri ADDRri:$addr, $src)>;
+def : Pat<(truncstorei32 i64:$src, ADDRrr:$addr), (STrr ADDRrr:$addr, $src)>;
+def : Pat<(truncstorei32 i64:$src, ADDRri:$addr), (STri ADDRri:$addr, $src)>;
+
+// store 0, addr -> store %g0, addr
+def : Pat<(store (i64 0), ADDRrr:$dst), (STXrr ADDRrr:$dst, (i64 G0))>;
+def : Pat<(store (i64 0), ADDRri:$dst), (STXri ADDRri:$dst, (i64 G0))>;
+
+} // Predicates = [Is64Bit]
+
+
+//===----------------------------------------------------------------------===//
+// 64-bit Conditionals.
+//===----------------------------------------------------------------------===//
+
+//
+// Flag-setting instructions like subcc and addcc set both icc and xcc flags.
+// The icc flags correspond to the 32-bit result, and the xcc are for the
+// full 64-bit result.
+//
+// We reuse CMPICC SDNodes for compares, but use new BRXCC branch nodes for
+// 64-bit compares. See LowerBR_CC.
+
+let Predicates = [Is64Bit] in {
+
+let Uses = [ICC], cc = 0b10 in
+ defm BPX : IPredBranch<"%xcc", [(SPbrxcc bb:$imm19, imm:$cond)]>;
+
+// Conditional moves on %xcc.
+let Uses = [ICC], Constraints = "$f = $rd" in {
+let intcc = 1, cc = 0b10 in {
+def MOVXCCrr : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %xcc, $rs2, $rd",
+ [(set i32:$rd,
+ (SPselectxcc i32:$rs2, i32:$f, imm:$cond))]>;
+def MOVXCCri : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond %xcc, $simm11, $rd",
+ [(set i32:$rd,
+ (SPselectxcc simm11:$simm11, i32:$f, imm:$cond))]>;
+} // cc
+
+let intcc = 1, opf_cc = 0b10 in {
+def FMOVS_XCC : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %xcc, $rs2, $rd",
+ [(set f32:$rd,
+ (SPselectxcc f32:$rs2, f32:$f, imm:$cond))]>;
+def FMOVD_XCC : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %xcc, $rs2, $rd",
+ [(set f64:$rd,
+ (SPselectxcc f64:$rs2, f64:$f, imm:$cond))]>;
+def FMOVQ_XCC : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovq$cond %xcc, $rs2, $rd",
+ [(set f128:$rd,
+ (SPselectxcc f128:$rs2, f128:$f, imm:$cond))]>;
+} // opf_cc
+} // Uses, Constraints
+
+// Branch On integer register with Prediction (BPr).
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in
+multiclass BranchOnReg<bits<3> cond, string OpcStr> {
+ def napt : F2_4<cond, 0, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, " $rs1, $imm16"), []>;
+ def apt : F2_4<cond, 1, 1, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",a $rs1, $imm16"), []>;
+ def napn : F2_4<cond, 0, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",pn $rs1, $imm16"), []>;
+ def apn : F2_4<cond, 1, 0, (outs), (ins I64Regs:$rs1, bprtarget16:$imm16),
+ !strconcat(OpcStr, ",a,pn $rs1, $imm16"), []>;
+}
+
+multiclass bpr_alias<string OpcStr, Instruction NAPT, Instruction APT> {
+ def : InstAlias<!strconcat(OpcStr, ",pt $rs1, $imm16"),
+ (NAPT I64Regs:$rs1, bprtarget16:$imm16), 0>;
+ def : InstAlias<!strconcat(OpcStr, ",a,pt $rs1, $imm16"),
+ (APT I64Regs:$rs1, bprtarget16:$imm16), 0>;
+}
+
+defm BPZ : BranchOnReg<0b001, "brz">;
+defm BPLEZ : BranchOnReg<0b010, "brlez">;
+defm BPLZ : BranchOnReg<0b011, "brlz">;
+defm BPNZ : BranchOnReg<0b101, "brnz">;
+defm BPGZ : BranchOnReg<0b110, "brgz">;
+defm BPGEZ : BranchOnReg<0b111, "brgez">;
+
+defm : bpr_alias<"brz", BPZnapt, BPZapt >;
+defm : bpr_alias<"brlez", BPLEZnapt, BPLEZapt>;
+defm : bpr_alias<"brlz", BPLZnapt, BPLZapt >;
+defm : bpr_alias<"brnz", BPNZnapt, BPNZapt >;
+defm : bpr_alias<"brgz", BPGZnapt, BPGZapt >;
+defm : bpr_alias<"brgez", BPGEZnapt, BPGEZapt>;
+
+// Move integer register on register condition (MOVr).
+multiclass MOVR< bits<3> rcond, string OpcStr> {
+ def rr : F4_4r<0b101111, 0b00000, rcond, (outs I64Regs:$rd),
+ (ins I64Regs:$rs1, IntRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+ def ri : F4_4i<0b101111, rcond, (outs I64Regs:$rd),
+ (ins I64Regs:$rs1, i64imm:$simm10),
+ !strconcat(OpcStr, " $rs1, $simm10, $rd"), []>;
+}
+
+defm MOVRRZ : MOVR<0b001, "movrz">;
+defm MOVRLEZ : MOVR<0b010, "movrlez">;
+defm MOVRLZ : MOVR<0b011, "movrlz">;
+defm MOVRNZ : MOVR<0b101, "movrnz">;
+defm MOVRGZ : MOVR<0b110, "movrgz">;
+defm MOVRGEZ : MOVR<0b111, "movrgez">;
+
+// Move FP register on integer register condition (FMOVr).
+multiclass FMOVR<bits<3> rcond, string OpcStr> {
+
+ def S : F4_4r<0b110101, 0b00101, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrs", OpcStr)," $rs1, $rs2, $rd"),
+ []>;
+ def D : F4_4r<0b110101, 0b00110, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrd", OpcStr)," $rs1, $rs2, $rd"),
+ []>;
+ def Q : F4_4r<0b110101, 0b00111, rcond,
+ (outs FPRegs:$rd), (ins I64Regs:$rs1, FPRegs:$rs2),
+ !strconcat(!strconcat("fmovrq", OpcStr)," $rs1, $rs2, $rd"),
+ []>, Requires<[HasHardQuad]>;
+}
+
+let Predicates = [HasV9] in {
+ defm FMOVRZ : FMOVR<0b001, "z">;
+ defm FMOVRLEZ : FMOVR<0b010, "lez">;
+ defm FMOVRLZ : FMOVR<0b011, "lz">;
+ defm FMOVRNZ : FMOVR<0b101, "nz">;
+ defm FMOVRGZ : FMOVR<0b110, "gz">;
+ defm FMOVRGEZ : FMOVR<0b111, "gez">;
+}
+
+//===----------------------------------------------------------------------===//
+// 64-bit Floating Point Conversions.
+//===----------------------------------------------------------------------===//
+
+let Predicates = [Is64Bit] in {
+
+def FXTOS : F3_3u<2, 0b110100, 0b010000100,
+ (outs FPRegs:$rd), (ins DFPRegs:$rs2),
+ "fxtos $rs2, $rd",
+ [(set FPRegs:$rd, (SPxtof DFPRegs:$rs2))]>;
+def FXTOD : F3_3u<2, 0b110100, 0b010001000,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fxtod $rs2, $rd",
+ [(set DFPRegs:$rd, (SPxtof DFPRegs:$rs2))]>;
+def FXTOQ : F3_3u<2, 0b110100, 0b010001100,
+ (outs QFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fxtoq $rs2, $rd",
+ [(set QFPRegs:$rd, (SPxtof DFPRegs:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+def FSTOX : F3_3u<2, 0b110100, 0b010000001,
+ (outs DFPRegs:$rd), (ins FPRegs:$rs2),
+ "fstox $rs2, $rd",
+ [(set DFPRegs:$rd, (SPftox FPRegs:$rs2))]>;
+def FDTOX : F3_3u<2, 0b110100, 0b010000010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fdtox $rs2, $rd",
+ [(set DFPRegs:$rd, (SPftox DFPRegs:$rs2))]>;
+def FQTOX : F3_3u<2, 0b110100, 0b010000011,
+ (outs DFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fqtox $rs2, $rd",
+ [(set DFPRegs:$rd, (SPftox QFPRegs:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+} // Predicates = [Is64Bit]
+
+def : Pat<(SPselectxcc i64:$t, i64:$f, imm:$cond),
+ (MOVXCCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselectxcc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVXCCri (as_i32imm $t), $f, imm:$cond)>;
+
+def : Pat<(SPselecticc i64:$t, i64:$f, imm:$cond),
+ (MOVICCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselecticc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVICCri (as_i32imm $t), $f, imm:$cond)>;
+
+def : Pat<(SPselectfcc i64:$t, i64:$f, imm:$cond),
+ (MOVFCCrr $t, $f, imm:$cond)>;
+def : Pat<(SPselectfcc (i64 simm11:$t), i64:$f, imm:$cond),
+ (MOVFCCri (as_i32imm $t), $f, imm:$cond)>;
+
+} // Predicates = [Is64Bit]
+
+
+// 64 bit SETHI
+let Predicates = [Is64Bit], isCodeGenOnly = 1 in {
+def SETHIXi : F2_1<0b100,
+ (outs IntRegs:$rd), (ins i64imm:$imm22),
+ "sethi $imm22, $rd",
+ [(set i64:$rd, SETHIimm:$imm22)]>;
+}
+
+// ATOMICS.
+let Predicates = [Is64Bit], Constraints = "$swap = $rd", asi = 0b10000000 in {
+ def CASXrr: F3_1_asi<3, 0b111110,
+ (outs I64Regs:$rd), (ins I64Regs:$rs1, I64Regs:$rs2,
+ I64Regs:$swap),
+ "casx [$rs1], $rs2, $rd",
+ [(set i64:$rd,
+ (atomic_cmp_swap_64 i64:$rs1, i64:$rs2, i64:$swap))]>;
+
+} // Predicates = [Is64Bit], Constraints = ...
+
+let Predicates = [Is64Bit] in {
+
+def : Pat<(atomic_fence imm, imm), (MEMBARi 0xf)>;
+
+// atomic_load_64 addr -> load addr
+def : Pat<(i64 (atomic_load_64 ADDRrr:$src)), (LDXrr ADDRrr:$src)>;
+def : Pat<(i64 (atomic_load_64 ADDRri:$src)), (LDXri ADDRri:$src)>;
+
+// atomic_store_64 val, addr -> store val, addr
+def : Pat<(atomic_store_64 ADDRrr:$dst, i64:$val), (STXrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_64 ADDRri:$dst, i64:$val), (STXri ADDRri:$dst, $val)>;
+
+} // Predicates = [Is64Bit]
+
+let Predicates = [Is64Bit], hasSideEffects = 1, Uses = [ICC], cc = 0b10 in
+ defm TXCC : TRAP<"%xcc">;
+
+// Global addresses, constant pool entries
+let Predicates = [Is64Bit] in {
+
+def : Pat<(SPhi tglobaladdr:$in), (SETHIi tglobaladdr:$in)>;
+def : Pat<(SPlo tglobaladdr:$in), (ORXri (i64 G0), tglobaladdr:$in)>;
+def : Pat<(SPhi tconstpool:$in), (SETHIi tconstpool:$in)>;
+def : Pat<(SPlo tconstpool:$in), (ORXri (i64 G0), tconstpool:$in)>;
+
+// GlobalTLS addresses
+def : Pat<(SPhi tglobaltlsaddr:$in), (SETHIi tglobaltlsaddr:$in)>;
+def : Pat<(SPlo tglobaltlsaddr:$in), (ORXri (i64 G0), tglobaltlsaddr:$in)>;
+def : Pat<(add (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (ADDXri (SETHIXi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+def : Pat<(xor (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (XORXri (SETHIXi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+
+// Blockaddress
+def : Pat<(SPhi tblockaddress:$in), (SETHIi tblockaddress:$in)>;
+def : Pat<(SPlo tblockaddress:$in), (ORXri (i64 G0), tblockaddress:$in)>;
+
+// Add reg, lo. This is used when taking the addr of a global/constpool entry.
+def : Pat<(add iPTR:$r, (SPlo tglobaladdr:$in)), (ADDXri $r, tglobaladdr:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tconstpool:$in)), (ADDXri $r, tconstpool:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tblockaddress:$in)),
+ (ADDXri $r, tblockaddress:$in)>;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td b/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td
new file mode 100644
index 000000000000..df570cea8da8
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrAliases.td
@@ -0,0 +1,506 @@
+//===-- SparcInstrAliases.td - Instruction Aliases for Sparc Target -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction aliases for Sparc.
+//===----------------------------------------------------------------------===//
+
+// Instruction aliases for conditional moves.
+
+// mov<cond> <ccreg> rs2, rd
+multiclass intcond_mov_alias<string cond, int condVal, string ccreg,
+ Instruction movrr, Instruction movri,
+ Instruction fmovs, Instruction fmovd> {
+
+ // mov<cond> (%icc|%xcc), rs2, rd
+ def : InstAlias<!strconcat(!strconcat(!strconcat("mov", cond), ccreg),
+ ", $rs2, $rd"),
+ (movrr IntRegs:$rd, IntRegs:$rs2, condVal)>;
+
+ // mov<cond> (%icc|%xcc), simm11, rd
+ def : InstAlias<!strconcat(!strconcat(!strconcat("mov", cond), ccreg),
+ ", $simm11, $rd"),
+ (movri IntRegs:$rd, i32imm:$simm11, condVal)>;
+
+ // fmovs<cond> (%icc|%xcc), $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat(!strconcat("fmovs", cond), ccreg),
+ ", $rs2, $rd"),
+ (fmovs FPRegs:$rd, FPRegs:$rs2, condVal)>;
+
+ // fmovd<cond> (%icc|%xcc), $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat(!strconcat("fmovd", cond), ccreg),
+ ", $rs2, $rd"),
+ (fmovd DFPRegs:$rd, DFPRegs:$rs2, condVal)>;
+}
+
+// mov<cond> <ccreg> rs2, rd
+multiclass fpcond_mov_alias<string cond, int condVal,
+ Instruction movrr, Instruction movri,
+ Instruction fmovs, Instruction fmovd> {
+
+ // mov<cond> %fcc[0-3], rs2, rd
+ def : InstAlias<!strconcat(!strconcat("mov", cond), " $cc, $rs2, $rd"),
+ (movrr IntRegs:$rd, FCCRegs:$cc, IntRegs:$rs2, condVal)>;
+
+ // mov<cond> %fcc[0-3], simm11, rd
+ def : InstAlias<!strconcat(!strconcat("mov", cond), " $cc, $simm11, $rd"),
+ (movri IntRegs:$rd, FCCRegs:$cc, i32imm:$simm11, condVal)>;
+
+ // fmovs<cond> %fcc[0-3], $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovs", cond), " $cc, $rs2, $rd"),
+ (fmovs FPRegs:$rd, FCCRegs:$cc, FPRegs:$rs2, condVal)>;
+
+ // fmovd<cond> %fcc[0-3], $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovd", cond), " $cc, $rs2, $rd"),
+ (fmovd DFPRegs:$rd, FCCRegs:$cc, DFPRegs:$rs2, condVal)>;
+}
+
+// Instruction aliases for integer conditional branches and moves.
+multiclass int_cond_alias<string cond, int condVal> {
+
+ // b<cond> $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), " $imm"),
+ (BCOND brtarget:$imm, condVal)>;
+
+ // b<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a $imm"),
+ (BCONDA brtarget:$imm, condVal)>;
+
+ // b<cond> %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), " %icc, $imm"),
+ (BPICC brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,pt %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pt %icc, $imm"),
+ (BPICC brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a %icc, $imm"),
+ (BPICCA brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a,pt %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pt %icc, $imm"),
+ (BPICCA brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,pn %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pn %icc, $imm"),
+ (BPICCNT brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond>,a,pn %icc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pn %icc, $imm"),
+ (BPICCANT brtarget:$imm, condVal)>, Requires<[HasV9]>;
+
+ // b<cond> %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), " %xcc, $imm"),
+ (BPXCC brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,pt %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pt %xcc, $imm"),
+ (BPXCC brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a %xcc, $imm"),
+ (BPXCCA brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a,pt %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pt %xcc, $imm"),
+ (BPXCCA brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,pn %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",pn %xcc, $imm"),
+ (BPXCCNT brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+ // b<cond>,a,pn %xcc, $imm
+ def : InstAlias<!strconcat(!strconcat("b", cond), ",a,pn %xcc, $imm"),
+ (BPXCCANT brtarget:$imm, condVal)>, Requires<[Is64Bit]>;
+
+
+ defm : intcond_mov_alias<cond, condVal, " %icc",
+ MOVICCrr, MOVICCri,
+ FMOVS_ICC, FMOVD_ICC>, Requires<[HasV9]>;
+
+ defm : intcond_mov_alias<cond, condVal, " %xcc",
+ MOVXCCrr, MOVXCCri,
+ FMOVS_XCC, FMOVD_XCC>, Requires<[Is64Bit]>;
+
+ // fmovq<cond> (%icc|%xcc), $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovq", cond), " %icc, $rs2, $rd"),
+ (FMOVQ_ICC QFPRegs:$rd, QFPRegs:$rs2, condVal)>,
+ Requires<[HasV9, HasHardQuad]>;
+ def : InstAlias<!strconcat(!strconcat("fmovq", cond), " %xcc, $rs2, $rd"),
+ (FMOVQ_XCC QFPRegs:$rd, QFPRegs:$rs2, condVal)>,
+ Requires<[Is64Bit, HasHardQuad]>;
+
+ // t<cond> %icc, rs => t<cond> %icc, G0 + rs
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs2"),
+ (TICCrr G0, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %icc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $rs2"),
+ (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+
+ // t<cond> %xcc, rs => t<cond> %xcc, G0 + rs
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs2"),
+ (TXCCrr G0, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %xcc, rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $rs2"),
+ (TXCCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ Requires<[HasV9]>;
+
+
+ // t<cond> rs=> t<cond> %icc, G0 + rs2
+ //def : InstAlias<!strconcat(!strconcat("t", cond), " $rs2"),
+ // (TICCrr G0, IntRegs:$rs2, condVal)>,
+ // Requires<[HasV9]>;
+
+ // t<cond> rs1 + rs2 => t<cond> %icc, rs1 + rs2
+ //def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
+ // (TICCrr IntRegs:$rs1, IntRegs:$rs2, condVal)>,
+ // Requires<[HasV9]>;
+
+ // t<cond> %icc, imm => t<cond> %icc, G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $imm"),
+ (TICCri G0, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %icc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %icc, $rs1 + $imm"),
+ (TICCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %xcc, imm => t<cond> %xcc, G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $imm"),
+ (TXCCri G0, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+ // t<cond> %xcc, rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " %xcc, $rs1 + $imm"),
+ (TXCCri IntRegs:$rs1, i32imm:$imm, condVal)>,
+ Requires<[HasV9]>;
+
+ // t<cond> imm => t<cond> G0 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $imm"),
+ (TRAPri G0, i32imm:$imm, condVal)>;
+
+ // t<cond> rs1 + imm => t<cond> rs1 + imm
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $imm"),
+ (TRAPri IntRegs:$rs1, i32imm:$imm, condVal)>;
+
+ // t<cond> rs1 => t<cond> G0 + rs1
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1"),
+ (TRAPrr G0, IntRegs:$rs1, condVal)>;
+
+ // t<cond> rs1 + rs2
+ def : InstAlias<!strconcat(!strconcat("t", cond), " $rs1 + $rs2"),
+ (TRAPrr IntRegs:$rs1, IntRegs:$rs2, condVal)>;
+}
+
+
+// Instruction aliases for floating point conditional branches and moves.
+multiclass fp_cond_alias<string cond, int condVal> {
+
+ // fb<cond> $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), " $imm"),
+ (FBCOND brtarget:$imm, condVal), 0>;
+
+ // fb<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a $imm"),
+ (FBCONDA brtarget:$imm, condVal), 0>;
+
+ // fb<cond> %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), " $cc, $imm"),
+ (BPFCC brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,pt %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",pt $cc, $imm"),
+ (BPFCC brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a $cc, $imm"),
+ (BPFCCA brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a,pt %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a,pt $cc, $imm"),
+ (BPFCCA brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,pn %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",pn $cc, $imm"),
+ (BPFCCNT brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ // fb<cond>,a,pn %fcc0, $imm
+ def : InstAlias<!strconcat(!strconcat("fb", cond), ",a,pn $cc, $imm"),
+ (BPFCCANT brtarget:$imm, condVal, FCCRegs:$cc)>,
+ Requires<[HasV9]>;
+
+ defm : fpcond_mov_alias<cond, condVal,
+ V9MOVFCCrr, V9MOVFCCri,
+ V9FMOVS_FCC, V9FMOVD_FCC>, Requires<[HasV9]>;
+
+ // fmovq<cond> %fcc0, $rs2, $rd
+ def : InstAlias<!strconcat(!strconcat("fmovq", cond), " $cc, $rs2, $rd"),
+ (V9FMOVQ_FCC QFPRegs:$rd, FCCRegs:$cc, QFPRegs:$rs2,
+ condVal)>,
+ Requires<[HasV9, HasHardQuad]>;
+}
+
+
+// Instruction aliases for co-processor conditional branches.
+multiclass cp_cond_alias<string cond, int condVal> {
+
+ // cb<cond> $imm
+ def : InstAlias<!strconcat(!strconcat("cb", cond), " $imm"),
+ (CBCOND brtarget:$imm, condVal), 0>;
+
+ // cb<cond>,a $imm
+ def : InstAlias<!strconcat(!strconcat("cb", cond), ",a $imm"),
+ (CBCONDA brtarget:$imm, condVal), 0>;
+}
+
+defm : int_cond_alias<"a", 0b1000>;
+defm : int_cond_alias<"n", 0b0000>;
+defm : int_cond_alias<"ne", 0b1001>;
+defm : int_cond_alias<"e", 0b0001>;
+defm : int_cond_alias<"g", 0b1010>;
+defm : int_cond_alias<"le", 0b0010>;
+defm : int_cond_alias<"ge", 0b1011>;
+defm : int_cond_alias<"l", 0b0011>;
+defm : int_cond_alias<"gu", 0b1100>;
+defm : int_cond_alias<"leu", 0b0100>;
+defm : int_cond_alias<"cc", 0b1101>;
+defm : int_cond_alias<"cs", 0b0101>;
+defm : int_cond_alias<"pos", 0b1110>;
+defm : int_cond_alias<"neg", 0b0110>;
+defm : int_cond_alias<"vc", 0b1111>;
+defm : int_cond_alias<"vs", 0b0111>;
+let EmitPriority = 0 in
+{
+ defm : int_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
+ defm : int_cond_alias<"nz", 0b1001>; // same as ne
+ defm : int_cond_alias<"eq", 0b0001>; // same as e
+ defm : int_cond_alias<"z", 0b0001>; // same as e
+ defm : int_cond_alias<"geu", 0b1101>; // same as cc
+ defm : int_cond_alias<"lu", 0b0101>; // same as cs
+}
+defm : fp_cond_alias<"a", 0b1000>;
+defm : fp_cond_alias<"n", 0b0000>;
+defm : fp_cond_alias<"u", 0b0111>;
+defm : fp_cond_alias<"g", 0b0110>;
+defm : fp_cond_alias<"ug", 0b0101>;
+defm : fp_cond_alias<"l", 0b0100>;
+defm : fp_cond_alias<"ul", 0b0011>;
+defm : fp_cond_alias<"lg", 0b0010>;
+defm : fp_cond_alias<"ne", 0b0001>;
+defm : fp_cond_alias<"e", 0b1001>;
+defm : fp_cond_alias<"ue", 0b1010>;
+defm : fp_cond_alias<"ge", 0b1011>;
+defm : fp_cond_alias<"uge", 0b1100>;
+defm : fp_cond_alias<"le", 0b1101>;
+defm : fp_cond_alias<"ule", 0b1110>;
+defm : fp_cond_alias<"o", 0b1111>;
+let EmitPriority = 0 in
+{
+ defm : fp_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
+ defm : fp_cond_alias<"nz", 0b0001>; // same as ne
+ defm : fp_cond_alias<"z", 0b1001>; // same as e
+}
+
+defm : cp_cond_alias<"a", 0b1000>;
+defm : cp_cond_alias<"n", 0b0000>;
+defm : cp_cond_alias<"3", 0b0111>;
+defm : cp_cond_alias<"2", 0b0110>;
+defm : cp_cond_alias<"23", 0b0101>;
+defm : cp_cond_alias<"1", 0b0100>;
+defm : cp_cond_alias<"13", 0b0011>;
+defm : cp_cond_alias<"12", 0b0010>;
+defm : cp_cond_alias<"123", 0b0001>;
+defm : cp_cond_alias<"0", 0b1001>;
+defm : cp_cond_alias<"03", 0b1010>;
+defm : cp_cond_alias<"02", 0b1011>;
+defm : cp_cond_alias<"023", 0b1100>;
+defm : cp_cond_alias<"01", 0b1101>;
+defm : cp_cond_alias<"013", 0b1110>;
+defm : cp_cond_alias<"012", 0b1111>;
+let EmitPriority = 0 in defm : cp_cond_alias<"", 0b1000>; // same as a; gnu asm, not in manual
+
+// Section A.3 Synthetic Instructions
+
+// Most are marked as Emit=0, so that they are not used for disassembly. This is
+// an aesthetic issue, but the chosen policy is to typically prefer using the
+// non-alias form, except for the most obvious and clarifying aliases: cmp, jmp,
+// call, tst, ret, retl.
+
+// Note: cmp is handled in SparcInstrInfo.
+// jmp/call/ret/retl have special case handling for output in
+// SparcInstPrinter.cpp
+
+// jmp addr -> jmpl addr, %g0
+def : InstAlias<"jmp $addr", (JMPLrr G0, MEMrr:$addr), 0>;
+def : InstAlias<"jmp $addr", (JMPLri G0, MEMri:$addr), 0>;
+
+// call addr -> jmpl addr, %o7
+def : InstAlias<"call $addr", (JMPLrr O7, MEMrr:$addr), 0>;
+def : InstAlias<"call $addr", (JMPLri O7, MEMri:$addr), 0>;
+
+// tst reg -> orcc %g0, reg, %g0
+def : InstAlias<"tst $rs2", (ORCCrr G0, IntRegs:$rs2, G0)>;
+
+// ret -> jmpl %i7+8, %g0 (aka RET 8)
+def : InstAlias<"ret", (RET 8)>;
+
+// retl -> jmpl %o7+8, %g0 (aka RETL 8)
+def : InstAlias<"retl", (RETL 8)>;
+
+// restore -> restore %g0, %g0, %g0
+def : InstAlias<"restore", (RESTORErr G0, G0, G0)>;
+
+// save -> restore %g0, %g0, %g0
+def : InstAlias<"save", (SAVErr G0, G0, G0)>;
+
+// set value, rd
+// (turns into a sequence of sethi+or, depending on the value)
+// def : InstAlias<"set $val, $rd", (ORri IntRegs:$rd, (SETHIi (HI22 imm:$val)), (LO10 imm:$val))>;
+def SET : AsmPseudoInst<(outs IntRegs:$rd), (ins i32imm:$val), "set $val, $rd">;
+
+// not rd -> xnor rd, %g0, rd
+def : InstAlias<"not $rd", (XNORrr IntRegs:$rd, IntRegs:$rd, G0), 0>;
+
+// not reg, rd -> xnor reg, %g0, rd
+def : InstAlias<"not $rs1, $rd", (XNORrr IntRegs:$rd, IntRegs:$rs1, G0), 0>;
+
+// neg rd -> sub %g0, rd, rd
+def : InstAlias<"neg $rd", (SUBrr IntRegs:$rd, G0, IntRegs:$rd), 0>;
+
+// neg reg, rd -> sub %g0, reg, rd
+def : InstAlias<"neg $rs2, $rd", (SUBrr IntRegs:$rd, G0, IntRegs:$rs2), 0>;
+
+// inc rd -> add rd, 1, rd
+def : InstAlias<"inc $rd", (ADDri IntRegs:$rd, IntRegs:$rd, 1), 0>;
+
+// inc simm13, rd -> add rd, simm13, rd
+def : InstAlias<"inc $simm13, $rd", (ADDri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// inccc rd -> addcc rd, 1, rd
+def : InstAlias<"inccc $rd", (ADDCCri IntRegs:$rd, IntRegs:$rd, 1), 0>;
+
+// inccc simm13, rd -> addcc rd, simm13, rd
+def : InstAlias<"inccc $simm13, $rd", (ADDCCri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// dec rd -> sub rd, 1, rd
+def : InstAlias<"dec $rd", (SUBri IntRegs:$rd, IntRegs:$rd, 1), 0>;
+
+// dec simm13, rd -> sub rd, simm13, rd
+def : InstAlias<"dec $simm13, $rd", (SUBri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// deccc rd -> subcc rd, 1, rd
+def : InstAlias<"deccc $rd", (SUBCCri IntRegs:$rd, IntRegs:$rd, 1), 0>;
+
+// deccc simm13, rd -> subcc rd, simm13, rd
+def : InstAlias<"deccc $simm13, $rd", (SUBCCri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// btst reg_or_imm, reg -> andcc reg,reg_or_imm,%g0
+def : InstAlias<"btst $rs2, $rs1", (ANDCCrr G0, IntRegs:$rs1, IntRegs:$rs2), 0>;
+def : InstAlias<"btst $simm13, $rs1", (ANDCCri G0, IntRegs:$rs1, i32imm:$simm13), 0>;
+
+// bset reg_or_imm, rd -> or rd,reg_or_imm,rd
+def : InstAlias<"bset $rs2, $rd", (ORrr IntRegs:$rd, IntRegs:$rd, IntRegs:$rs2), 0>;
+def : InstAlias<"bset $simm13, $rd", (ORri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// bclr reg_or_imm, rd -> andn rd,reg_or_imm,rd
+def : InstAlias<"bclr $rs2, $rd", (ANDNrr IntRegs:$rd, IntRegs:$rd, IntRegs:$rs2), 0>;
+def : InstAlias<"bclr $simm13, $rd", (ANDNri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+// btog reg_or_imm, rd -> xor rd,reg_or_imm,rd
+def : InstAlias<"btog $rs2, $rd", (XORrr IntRegs:$rd, IntRegs:$rd, IntRegs:$rs2), 0>;
+def : InstAlias<"btog $simm13, $rd", (XORri IntRegs:$rd, IntRegs:$rd, i32imm:$simm13), 0>;
+
+
+// clr rd -> or %g0, %g0, rd
+def : InstAlias<"clr $rd", (ORrr IntRegs:$rd, G0, G0), 0>;
+
+// clr{b,h,} [addr] -> st{b,h,} %g0, [addr]
+def : InstAlias<"clrb [$addr]", (STBrr MEMrr:$addr, G0), 0>;
+def : InstAlias<"clrb [$addr]", (STBri MEMri:$addr, G0), 0>;
+def : InstAlias<"clrh [$addr]", (STHrr MEMrr:$addr, G0), 0>;
+def : InstAlias<"clrh [$addr]", (STHri MEMri:$addr, G0), 0>;
+def : InstAlias<"clr [$addr]", (STrr MEMrr:$addr, G0), 0>;
+def : InstAlias<"clr [$addr]", (STri MEMri:$addr, G0), 0>;
+
+
+// mov reg_or_imm, rd -> or %g0, reg_or_imm, rd
+def : InstAlias<"mov $rs2, $rd", (ORrr IntRegs:$rd, G0, IntRegs:$rs2)>;
+def : InstAlias<"mov $simm13, $rd", (ORri IntRegs:$rd, G0, i32imm:$simm13)>;
+
+// mov specialreg, rd -> rd specialreg, rd
+def : InstAlias<"mov $asr, $rd", (RDASR IntRegs:$rd, ASRRegs:$asr), 0>;
+def : InstAlias<"mov %psr, $rd", (RDPSR IntRegs:$rd), 0>;
+def : InstAlias<"mov %wim, $rd", (RDWIM IntRegs:$rd), 0>;
+def : InstAlias<"mov %tbr, $rd", (RDTBR IntRegs:$rd), 0>;
+
+// mov reg_or_imm, specialreg -> wr %g0, reg_or_imm, specialreg
+def : InstAlias<"mov $rs2, $asr", (WRASRrr ASRRegs:$asr, G0, IntRegs:$rs2), 0>;
+def : InstAlias<"mov $simm13, $asr", (WRASRri ASRRegs:$asr, G0, i32imm:$simm13), 0>;
+def : InstAlias<"mov $rs2, %psr", (WRPSRrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"mov $simm13, %psr", (WRPSRri G0, i32imm:$simm13), 0>;
+def : InstAlias<"mov $rs2, %wim", (WRWIMrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"mov $simm13, %wim", (WRWIMri G0, i32imm:$simm13), 0>;
+def : InstAlias<"mov $rs2, %tbr", (WRTBRrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"mov $simm13, %tbr", (WRTBRri G0, i32imm:$simm13), 0>;
+
+// End of Section A.3
+
+// wr reg_or_imm, specialreg -> wr %g0, reg_or_imm, specialreg
+// (aka: omit the first arg when it's g0. This is not in the manual, but is
+// supported by gnu and solaris as)
+def : InstAlias<"wr $rs2, $asr", (WRASRrr ASRRegs:$asr, G0, IntRegs:$rs2), 0>;
+def : InstAlias<"wr $simm13, $asr", (WRASRri ASRRegs:$asr, G0, i32imm:$simm13), 0>;
+def : InstAlias<"wr $rs2, %psr", (WRPSRrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"wr $simm13, %psr", (WRPSRri G0, i32imm:$simm13), 0>;
+def : InstAlias<"wr $rs2, %wim", (WRWIMrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"wr $simm13, %wim", (WRWIMri G0, i32imm:$simm13), 0>;
+def : InstAlias<"wr $rs2, %tbr", (WRTBRrr G0, IntRegs:$rs2), 0>;
+def : InstAlias<"wr $simm13, %tbr", (WRTBRri G0, i32imm:$simm13), 0>;
+
+
+// flush -> flush %g0
+def : InstAlias<"flush", (FLUSH), 0>;
+
+
+def : MnemonicAlias<"lduw", "ld">, Requires<[HasV9]>;
+def : MnemonicAlias<"lduwa", "lda">, Requires<[HasV9]>;
+
+def : MnemonicAlias<"return", "rett">, Requires<[HasV9]>;
+
+def : MnemonicAlias<"addc", "addx">, Requires<[HasV9]>;
+def : MnemonicAlias<"addccc", "addxcc">, Requires<[HasV9]>;
+
+def : MnemonicAlias<"subc", "subx">, Requires<[HasV9]>;
+def : MnemonicAlias<"subccc", "subxcc">, Requires<[HasV9]>;
+
+
+def : InstAlias<"fcmps $rs1, $rs2", (V9FCMPS FCC0, FPRegs:$rs1, FPRegs:$rs2)>;
+def : InstAlias<"fcmpd $rs1, $rs2", (V9FCMPD FCC0, DFPRegs:$rs1, DFPRegs:$rs2)>;
+def : InstAlias<"fcmpq $rs1, $rs2", (V9FCMPQ FCC0, QFPRegs:$rs1, QFPRegs:$rs2)>,
+ Requires<[HasHardQuad]>;
+
+def : InstAlias<"fcmpes $rs1, $rs2", (V9FCMPES FCC0, FPRegs:$rs1, FPRegs:$rs2)>;
+def : InstAlias<"fcmped $rs1, $rs2", (V9FCMPED FCC0, DFPRegs:$rs1,
+ DFPRegs:$rs2)>;
+def : InstAlias<"fcmpeq $rs1, $rs2", (V9FCMPEQ FCC0, QFPRegs:$rs1,
+ QFPRegs:$rs2)>,
+ Requires<[HasHardQuad]>;
+
+// signx rd -> sra rd, %g0, rd
+def : InstAlias<"signx $rd", (SRArr IntRegs:$rd, IntRegs:$rd, G0), 0>, Requires<[HasV9]>;
+
+// signx reg, rd -> sra reg, %g0, rd
+def : InstAlias<"signx $rs1, $rd", (SRArr IntRegs:$rd, IntRegs:$rs1, G0), 0>, Requires<[HasV9]>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
new file mode 100644
index 000000000000..76366c6695f4
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrFormats.td
@@ -0,0 +1,369 @@
+//===-- SparcInstrFormats.td - Sparc Instruction Formats ---*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class InstSP<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : Instruction {
+ field bits<32> Inst;
+
+ let Namespace = "SP";
+ let Size = 4;
+
+ bits<2> op;
+ let Inst{31-30} = op; // Top two bits are the 'op' field
+
+ dag OutOperandList = outs;
+ dag InOperandList = ins;
+ let AsmString = asmstr;
+ let Pattern = pattern;
+
+ let DecoderNamespace = "Sparc";
+ field bits<32> SoftFail = 0;
+
+ let Itinerary = itin;
+}
+
+//===----------------------------------------------------------------------===//
+// Format #2 instruction classes in the Sparc
+//===----------------------------------------------------------------------===//
+
+// Format 2 instructions
+class F2<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
+ bits<3> op2;
+ bits<22> imm22;
+ let op = 0; // op = 0
+ let Inst{24-22} = op2;
+ let Inst{21-0} = imm22;
+}
+
+// Specific F2 classes: SparcV8 manual, page 44
+//
+class F2_1<bits<3> op2Val, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F2<outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+
+ let op2 = op2Val;
+
+ let Inst{29-25} = rd;
+}
+
+class F2_2<bits<3> op2Val, bit annul, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F2<outs, ins, asmstr, pattern, itin> {
+ bits<4> cond;
+ let op2 = op2Val;
+
+ let Inst{29} = annul;
+ let Inst{28-25} = cond;
+}
+
+class F2_3<bits<3> op2Val, bit annul, bit pred,
+ dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
+ bits<2> cc;
+ bits<4> cond;
+ bits<19> imm19;
+
+ let op = 0; // op = 0
+
+ let Inst{29} = annul;
+ let Inst{28-25} = cond;
+ let Inst{24-22} = op2Val;
+ let Inst{21-20} = cc;
+ let Inst{19} = pred;
+ let Inst{18-0} = imm19;
+}
+
+class F2_4<bits<3> cond, bit annul, bit pred, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
+ bits<16> imm16;
+ bits<5> rs1;
+
+ let op = 0; // op = 0
+
+ let Inst{29} = annul;
+ let Inst{28} = 0;
+ let Inst{27-25} = cond;
+ let Inst{24-22} = 0b011;
+ let Inst{21-20} = imm16{15-14};
+ let Inst{19} = pred;
+ let Inst{18-14} = rs1;
+ let Inst{13-0} = imm16{13-0};
+}
+
+
+//===----------------------------------------------------------------------===//
+// Format #3 instruction classes in the Sparc
+//===----------------------------------------------------------------------===//
+
+class F3<dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+ bits<6> op3;
+ bits<5> rs1;
+ let op{1} = 1; // Op = 2 or 3
+ let Inst{29-25} = rd;
+ let Inst{24-19} = op3;
+ let Inst{18-14} = rs1;
+}
+
+// Specific F3 classes: SparcV8 manual, page 44
+//
+class F3_1_asi<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<8> asi;
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 0; // i field = 0
+ let Inst{12-5} = asi; // address space identifier
+ let Inst{4-0} = rs2;
+}
+
+class F3_1<bits<2> opVal, bits<6> op3val, dag outs, dag ins, string asmstr,
+ list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3_1_asi<opVal, op3val, outs, ins, asmstr, pattern, itin> {
+ let asi = 0;
+}
+
+class F3_2<bits<2> opVal, bits<6> op3val, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<13> simm13;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 1; // i field = 1
+ let Inst{12-0} = simm13;
+}
+
+// floating-point
+class F3_3<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13-5} = opfval; // fp opcode
+ let Inst{4-0} = rs2;
+}
+
+// floating-point unary operations.
+class F3_3u<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+ let rs1 = 0;
+
+ let Inst{13-5} = opfval; // fp opcode
+ let Inst{4-0} = rs2;
+}
+
+// floating-point compares.
+class F3_3c<bits<2> opVal, bits<6> op3val, bits<9> opfval, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13-5} = opfval; // fp opcode
+ let Inst{4-0} = rs2;
+}
+
+// Shift by register rs2.
+class F3_Sr<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bit x = xVal; // 1 for 64-bit shifts.
+ bits<5> rs2;
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 0; // i field = 0
+ let Inst{12} = x; // extended registers.
+ let Inst{4-0} = rs2;
+}
+
+// Shift by immediate.
+class F3_Si<bits<2> opVal, bits<6> op3val, bit xVal, dag outs, dag ins,
+ string asmstr, list<dag> pattern, InstrItinClass itin = IIC_iu_instr>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bit x = xVal; // 1 for 64-bit shifts.
+ bits<6> shcnt; // shcnt32 / shcnt64.
+
+ let op = opVal;
+ let op3 = op3val;
+
+ let Inst{13} = 1; // i field = 1
+ let Inst{12} = x; // extended registers.
+ let Inst{5-0} = shcnt;
+}
+
+// Define rr and ri shift instructions with patterns.
+multiclass F3_S<string OpcStr, bits<6> Op3Val, bit XVal, SDNode OpNode,
+ ValueType VT, RegisterClass RC,
+ InstrItinClass itin = IIC_iu_instr> {
+ def rr : F3_Sr<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs1, IntRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"),
+ [(set VT:$rd, (OpNode VT:$rs1, i32:$rs2))],
+ itin>;
+ def ri : F3_Si<2, Op3Val, XVal, (outs RC:$rd), (ins RC:$rs1, i32imm:$shcnt),
+ !strconcat(OpcStr, " $rs1, $shcnt, $rd"),
+ [(set VT:$rd, (OpNode VT:$rs1, (i32 imm:$shcnt)))],
+ itin>;
+}
+
+class F4<bits<6> op3, dag outs, dag ins, string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : InstSP<outs, ins, asmstr, pattern, itin> {
+ bits<5> rd;
+
+ let op = 2;
+ let Inst{29-25} = rd;
+ let Inst{24-19} = op3;
+}
+
+
+class F4_1<bits<6> op3, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
+ bit intcc;
+ bits<2> cc;
+ bits<4> cond;
+ bits<5> rs2;
+
+ let Inst{4-0} = rs2;
+ let Inst{12-11} = cc;
+ let Inst{13} = 0;
+ let Inst{17-14} = cond;
+ let Inst{18} = intcc;
+}
+
+class F4_2<bits<6> op3, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
+ bit intcc;
+ bits<2> cc;
+ bits<4> cond;
+ bits<11> simm11;
+
+ let Inst{10-0} = simm11;
+ let Inst{12-11} = cc;
+ let Inst{13} = 1;
+ let Inst{17-14} = cond;
+ let Inst{18} = intcc;
+}
+
+class F4_3<bits<6> op3, bits<6> opf_low, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
+ bits<4> cond;
+ bit intcc;
+ bits<2> opf_cc;
+ bits<5> rs2;
+
+ let Inst{18} = 0;
+ let Inst{17-14} = cond;
+ let Inst{13} = intcc;
+ let Inst{12-11} = opf_cc;
+ let Inst{10-5} = opf_low;
+ let Inst{4-0} = rs2;
+}
+
+class F4_4r<bits<6> op3, bits<5> opf_low, bits<3> rcond, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
+ bits <5> rs1;
+ bits <5> rs2;
+ let Inst{18-14} = rs1;
+ let Inst{13} = 0; // IsImm
+ let Inst{12-10} = rcond;
+ let Inst{9-5} = opf_low;
+ let Inst{4-0} = rs2;
+}
+
+
+class F4_4i<bits<6> op3, bits<3> rcond, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F4<op3, outs, ins, asmstr, pattern, itin> {
+ bits<5> rs1;
+ bits<10> simm10;
+ let Inst{18-14} = rs1;
+ let Inst{13} = 1; // IsImm
+ let Inst{12-10} = rcond;
+ let Inst{9-0} = simm10;
+}
+
+
+class TRAPSP<bits<6> op3Val, bit isimm, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : F3<outs, ins, asmstr, pattern, itin> {
+ bits<4> cond;
+ bits<2> cc;
+
+ let op = 0b10;
+ let rd{4} = 0;
+ let rd{3-0} = cond;
+ let op3 = op3Val;
+ let Inst{13} = isimm;
+ let Inst{12-11} = cc;
+
+}
+
+class TRAPSPrr<bits<6> op3Val, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : TRAPSP<op3Val, 0, outs, ins, asmstr, pattern, itin> {
+ bits<5> rs2;
+
+ let Inst{10-5} = 0;
+ let Inst{4-0} = rs2;
+}
+
+class TRAPSPri<bits<6> op3Val, dag outs, dag ins,
+ string asmstr, list<dag> pattern,
+ InstrItinClass itin = NoItinerary>
+ : TRAPSP<op3Val, 1, outs, ins, asmstr, pattern, itin> {
+ bits<8> imm;
+
+ let Inst{10-8} = 0;
+ let Inst{7-0} = imm;
+}
+
+// Pseudo-instructions for alternate assembly syntax (never used by codegen).
+// These are aliases that require C++ handling to convert to the target
+// instruction, while InstAliases can be handled directly by tblgen.
+class AsmPseudoInst<dag outs, dag ins, string asm>
+ : InstSP<outs, ins, asm, []> {
+ let isPseudo = 1;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
new file mode 100644
index 000000000000..ea8ed830bafc
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.cpp
@@ -0,0 +1,510 @@
+//===-- SparcInstrInfo.cpp - Sparc Instruction Information ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Sparc implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcInstrInfo.h"
+#include "Sparc.h"
+#include "SparcMachineFunctionInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define GET_INSTRINFO_CTOR_DTOR
+#include "SparcGenInstrInfo.inc"
+
+// Pin the vtable to this file.
+void SparcInstrInfo::anchor() {}
+
+SparcInstrInfo::SparcInstrInfo(SparcSubtarget &ST)
+ : SparcGenInstrInfo(SP::ADJCALLSTACKDOWN, SP::ADJCALLSTACKUP), RI(),
+ Subtarget(ST) {}
+
+/// isLoadFromStackSlot - If the specified machine instruction is a direct
+/// load from a stack slot, return the virtual or physical register number of
+/// the destination along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than loading from the stack slot.
+unsigned SparcInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ if (MI.getOpcode() == SP::LDri || MI.getOpcode() == SP::LDXri ||
+ MI.getOpcode() == SP::LDFri || MI.getOpcode() == SP::LDDFri ||
+ MI.getOpcode() == SP::LDQFri) {
+ if (MI.getOperand(1).isFI() && MI.getOperand(2).isImm() &&
+ MI.getOperand(2).getImm() == 0) {
+ FrameIndex = MI.getOperand(1).getIndex();
+ return MI.getOperand(0).getReg();
+ }
+ }
+ return 0;
+}
+
+/// isStoreToStackSlot - If the specified machine instruction is a direct
+/// store to a stack slot, return the virtual or physical register number of
+/// the source reg along with the FrameIndex of the loaded stack slot. If
+/// not, return 0. This predicate must return 0 if the instruction has
+/// any side effects other than storing to the stack slot.
+unsigned SparcInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const {
+ if (MI.getOpcode() == SP::STri || MI.getOpcode() == SP::STXri ||
+ MI.getOpcode() == SP::STFri || MI.getOpcode() == SP::STDFri ||
+ MI.getOpcode() == SP::STQFri) {
+ if (MI.getOperand(0).isFI() && MI.getOperand(1).isImm() &&
+ MI.getOperand(1).getImm() == 0) {
+ FrameIndex = MI.getOperand(0).getIndex();
+ return MI.getOperand(2).getReg();
+ }
+ }
+ return 0;
+}
+
+static bool IsIntegerCC(unsigned CC)
+{
+ return (CC <= SPCC::ICC_VC);
+}
+
+static SPCC::CondCodes GetOppositeBranchCondition(SPCC::CondCodes CC)
+{
+ switch(CC) {
+ case SPCC::ICC_A: return SPCC::ICC_N;
+ case SPCC::ICC_N: return SPCC::ICC_A;
+ case SPCC::ICC_NE: return SPCC::ICC_E;
+ case SPCC::ICC_E: return SPCC::ICC_NE;
+ case SPCC::ICC_G: return SPCC::ICC_LE;
+ case SPCC::ICC_LE: return SPCC::ICC_G;
+ case SPCC::ICC_GE: return SPCC::ICC_L;
+ case SPCC::ICC_L: return SPCC::ICC_GE;
+ case SPCC::ICC_GU: return SPCC::ICC_LEU;
+ case SPCC::ICC_LEU: return SPCC::ICC_GU;
+ case SPCC::ICC_CC: return SPCC::ICC_CS;
+ case SPCC::ICC_CS: return SPCC::ICC_CC;
+ case SPCC::ICC_POS: return SPCC::ICC_NEG;
+ case SPCC::ICC_NEG: return SPCC::ICC_POS;
+ case SPCC::ICC_VC: return SPCC::ICC_VS;
+ case SPCC::ICC_VS: return SPCC::ICC_VC;
+
+ case SPCC::FCC_A: return SPCC::FCC_N;
+ case SPCC::FCC_N: return SPCC::FCC_A;
+ case SPCC::FCC_U: return SPCC::FCC_O;
+ case SPCC::FCC_O: return SPCC::FCC_U;
+ case SPCC::FCC_G: return SPCC::FCC_ULE;
+ case SPCC::FCC_LE: return SPCC::FCC_UG;
+ case SPCC::FCC_UG: return SPCC::FCC_LE;
+ case SPCC::FCC_ULE: return SPCC::FCC_G;
+ case SPCC::FCC_L: return SPCC::FCC_UGE;
+ case SPCC::FCC_GE: return SPCC::FCC_UL;
+ case SPCC::FCC_UL: return SPCC::FCC_GE;
+ case SPCC::FCC_UGE: return SPCC::FCC_L;
+ case SPCC::FCC_LG: return SPCC::FCC_UE;
+ case SPCC::FCC_UE: return SPCC::FCC_LG;
+ case SPCC::FCC_NE: return SPCC::FCC_E;
+ case SPCC::FCC_E: return SPCC::FCC_NE;
+
+ case SPCC::CPCC_A: return SPCC::CPCC_N;
+ case SPCC::CPCC_N: return SPCC::CPCC_A;
+ case SPCC::CPCC_3: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_2: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_23: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_1: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_13: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_12: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_123: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_0: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_03: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_02: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_023: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_01: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_013: LLVM_FALLTHROUGH;
+ case SPCC::CPCC_012:
+ // "Opposite" code is not meaningful, as we don't know
+ // what the CoProc condition means here. The cond-code will
+ // only be used in inline assembler, so this code should
+ // not be reached in a normal compilation pass.
+ llvm_unreachable("Meaningless inversion of co-processor cond code");
+ }
+ llvm_unreachable("Invalid cond code");
+}
+
+static bool isUncondBranchOpcode(int Opc) { return Opc == SP::BA; }
+
+static bool isCondBranchOpcode(int Opc) {
+ return Opc == SP::FBCOND || Opc == SP::BCOND;
+}
+
+static bool isIndirectBranchOpcode(int Opc) {
+ return Opc == SP::BINDrr || Opc == SP::BINDri;
+}
+
+static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target,
+ SmallVectorImpl<MachineOperand> &Cond) {
+ Cond.push_back(MachineOperand::CreateImm(LastInst->getOperand(1).getImm()));
+ Target = LastInst->getOperand(0).getMBB();
+}
+
+bool SparcInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify) const {
+ MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
+ if (I == MBB.end())
+ return false;
+
+ if (!isUnpredicatedTerminator(*I))
+ return false;
+
+ // Get the last instruction in the block.
+ MachineInstr *LastInst = &*I;
+ unsigned LastOpc = LastInst->getOpcode();
+
+ // If there is only one terminator instruction, process it.
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
+ if (isUncondBranchOpcode(LastOpc)) {
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+ if (isCondBranchOpcode(LastOpc)) {
+ // Block ends with fall-through condbranch.
+ parseCondBranch(LastInst, TBB, Cond);
+ return false;
+ }
+ return true; // Can't handle indirect branch.
+ }
+
+ // Get the instruction before it if it is a terminator.
+ MachineInstr *SecondLastInst = &*I;
+ unsigned SecondLastOpc = SecondLastInst->getOpcode();
+
+ // If AllowModify is true and the block ends with two or more unconditional
+ // branches, delete all but the first unconditional branch.
+ if (AllowModify && isUncondBranchOpcode(LastOpc)) {
+ while (isUncondBranchOpcode(SecondLastOpc)) {
+ LastInst->eraseFromParent();
+ LastInst = SecondLastInst;
+ LastOpc = LastInst->getOpcode();
+ if (I == MBB.begin() || !isUnpredicatedTerminator(*--I)) {
+ // Return now the only terminator is an unconditional branch.
+ TBB = LastInst->getOperand(0).getMBB();
+ return false;
+ } else {
+ SecondLastInst = &*I;
+ SecondLastOpc = SecondLastInst->getOpcode();
+ }
+ }
+ }
+
+ // If there are three terminators, we don't know what sort of block this is.
+ if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(*--I))
+ return true;
+
+ // If the block ends with a B and a Bcc, handle it.
+ if (isCondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
+ parseCondBranch(SecondLastInst, TBB, Cond);
+ FBB = LastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // If the block ends with two unconditional branches, handle it. The second
+ // one is not executed.
+ if (isUncondBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
+ TBB = SecondLastInst->getOperand(0).getMBB();
+ return false;
+ }
+
+ // ...likewise if it ends with an indirect branch followed by an unconditional
+ // branch.
+ if (isIndirectBranchOpcode(SecondLastOpc) && isUncondBranchOpcode(LastOpc)) {
+ I = LastInst;
+ if (AllowModify)
+ I->eraseFromParent();
+ return true;
+ }
+
+ // Otherwise, can't handle this.
+ return true;
+}
+
+unsigned SparcInstrInfo::insertBranch(MachineBasicBlock &MBB,
+ MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB,
+ ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded) const {
+ assert(TBB && "insertBranch must not be told to insert a fallthrough");
+ assert((Cond.size() == 1 || Cond.size() == 0) &&
+ "Sparc branch conditions should have one component!");
+ assert(!BytesAdded && "code size not handled");
+
+ if (Cond.empty()) {
+ assert(!FBB && "Unconditional branch with multiple successors!");
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(TBB);
+ return 1;
+ }
+
+ // Conditional branch
+ unsigned CC = Cond[0].getImm();
+
+ if (IsIntegerCC(CC))
+ BuildMI(&MBB, DL, get(SP::BCOND)).addMBB(TBB).addImm(CC);
+ else
+ BuildMI(&MBB, DL, get(SP::FBCOND)).addMBB(TBB).addImm(CC);
+ if (!FBB)
+ return 1;
+
+ BuildMI(&MBB, DL, get(SP::BA)).addMBB(FBB);
+ return 2;
+}
+
+unsigned SparcInstrInfo::removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved) const {
+ assert(!BytesRemoved && "code size not handled");
+
+ MachineBasicBlock::iterator I = MBB.end();
+ unsigned Count = 0;
+ while (I != MBB.begin()) {
+ --I;
+
+ if (I->isDebugValue())
+ continue;
+
+ if (I->getOpcode() != SP::BA
+ && I->getOpcode() != SP::BCOND
+ && I->getOpcode() != SP::FBCOND)
+ break; // Not a branch
+
+ I->eraseFromParent();
+ I = MBB.end();
+ ++Count;
+ }
+ return Count;
+}
+
+bool SparcInstrInfo::reverseBranchCondition(
+ SmallVectorImpl<MachineOperand> &Cond) const {
+ assert(Cond.size() == 1);
+ SPCC::CondCodes CC = static_cast<SPCC::CondCodes>(Cond[0].getImm());
+ Cond[0].setImm(GetOppositeBranchCondition(CC));
+ return false;
+}
+
+void SparcInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg,
+ unsigned SrcReg, bool KillSrc) const {
+ unsigned numSubRegs = 0;
+ unsigned movOpc = 0;
+ const unsigned *subRegIdx = nullptr;
+ bool ExtraG0 = false;
+
+ const unsigned DW_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
+ const unsigned DFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd };
+ const unsigned QFP_DFP_SubRegsIdx[] = { SP::sub_even64, SP::sub_odd64 };
+ const unsigned QFP_FP_SubRegsIdx[] = { SP::sub_even, SP::sub_odd,
+ SP::sub_odd64_then_sub_even,
+ SP::sub_odd64_then_sub_odd };
+
+ if (SP::IntRegsRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(SP::ORrr), DestReg).addReg(SP::G0)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (SP::IntPairRegClass.contains(DestReg, SrcReg)) {
+ subRegIdx = DW_SubRegsIdx;
+ numSubRegs = 2;
+ movOpc = SP::ORrr;
+ ExtraG0 = true;
+ } else if (SP::FPRegsRegClass.contains(DestReg, SrcReg))
+ BuildMI(MBB, I, DL, get(SP::FMOVS), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ else if (SP::DFPRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.isV9()) {
+ BuildMI(MBB, I, DL, get(SP::FMOVD), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ // Use two FMOVS instructions.
+ subRegIdx = DFP_FP_SubRegsIdx;
+ numSubRegs = 2;
+ movOpc = SP::FMOVS;
+ }
+ } else if (SP::QFPRegsRegClass.contains(DestReg, SrcReg)) {
+ if (Subtarget.isV9()) {
+ if (Subtarget.hasHardQuad()) {
+ BuildMI(MBB, I, DL, get(SP::FMOVQ), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else {
+ // Use two FMOVD instructions.
+ subRegIdx = QFP_DFP_SubRegsIdx;
+ numSubRegs = 2;
+ movOpc = SP::FMOVD;
+ }
+ } else {
+ // Use four FMOVS instructions.
+ subRegIdx = QFP_FP_SubRegsIdx;
+ numSubRegs = 4;
+ movOpc = SP::FMOVS;
+ }
+ } else if (SP::ASRRegsRegClass.contains(DestReg) &&
+ SP::IntRegsRegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(SP::WRASRrr), DestReg)
+ .addReg(SP::G0)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else if (SP::IntRegsRegClass.contains(DestReg) &&
+ SP::ASRRegsRegClass.contains(SrcReg)) {
+ BuildMI(MBB, I, DL, get(SP::RDASR), DestReg)
+ .addReg(SrcReg, getKillRegState(KillSrc));
+ } else
+ llvm_unreachable("Impossible reg-to-reg copy");
+
+ if (numSubRegs == 0 || subRegIdx == nullptr || movOpc == 0)
+ return;
+
+ const TargetRegisterInfo *TRI = &getRegisterInfo();
+ MachineInstr *MovMI = nullptr;
+
+ for (unsigned i = 0; i != numSubRegs; ++i) {
+ unsigned Dst = TRI->getSubReg(DestReg, subRegIdx[i]);
+ unsigned Src = TRI->getSubReg(SrcReg, subRegIdx[i]);
+ assert(Dst && Src && "Bad sub-register");
+
+ MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(movOpc), Dst);
+ if (ExtraG0)
+ MIB.addReg(SP::G0);
+ MIB.addReg(Src);
+ MovMI = MIB.getInstr();
+ }
+ // Add implicit super-register defs and kills to the last MovMI.
+ MovMI->addRegisterDefined(DestReg, TRI);
+ if (KillSrc)
+ MovMI->addRegisterKilled(SrcReg, TRI);
+}
+
+void SparcInstrInfo::
+storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned SrcReg, bool isKill, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = MF->getFrameInfo();
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOStore,
+ MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
+
+ // On the order of operands here: think "[FrameIdx + 0] = SrcReg".
+ if (RC == &SP::I64RegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::STXri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (RC == &SP::IntRegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::STri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (RC == &SP::IntPairRegClass)
+ BuildMI(MBB, I, DL, get(SP::STDri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (RC == &SP::FPRegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::STFri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
+ BuildMI(MBB, I, DL, get(SP::STDFri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
+ // Use STQFri irrespective of its legality. If STQ is not legal, it will be
+ // lowered into two STDs in eliminateFrameIndex.
+ BuildMI(MBB, I, DL, get(SP::STQFri)).addFrameIndex(FI).addImm(0)
+ .addReg(SrcReg, getKillRegState(isKill)).addMemOperand(MMO);
+ else
+ llvm_unreachable("Can't store this register to stack slot");
+}
+
+void SparcInstrInfo::
+loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ unsigned DestReg, int FI,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const {
+ DebugLoc DL;
+ if (I != MBB.end()) DL = I->getDebugLoc();
+
+ MachineFunction *MF = MBB.getParent();
+ const MachineFrameInfo &MFI = MF->getFrameInfo();
+ MachineMemOperand *MMO = MF->getMachineMemOperand(
+ MachinePointerInfo::getFixedStack(*MF, FI), MachineMemOperand::MOLoad,
+ MFI.getObjectSize(FI), MFI.getObjectAlignment(FI));
+
+ if (RC == &SP::I64RegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDXri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (RC == &SP::IntRegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (RC == &SP::IntPairRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDDri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (RC == &SP::FPRegsRegClass)
+ BuildMI(MBB, I, DL, get(SP::LDFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (SP::DFPRegsRegClass.hasSubClassEq(RC))
+ BuildMI(MBB, I, DL, get(SP::LDDFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else if (SP::QFPRegsRegClass.hasSubClassEq(RC))
+ // Use LDQFri irrespective of its legality. If LDQ is not legal, it will be
+ // lowered into two LDDs in eliminateFrameIndex.
+ BuildMI(MBB, I, DL, get(SP::LDQFri), DestReg).addFrameIndex(FI).addImm(0)
+ .addMemOperand(MMO);
+ else
+ llvm_unreachable("Can't load this register from stack slot");
+}
+
+unsigned SparcInstrInfo::getGlobalBaseReg(MachineFunction *MF) const
+{
+ SparcMachineFunctionInfo *SparcFI = MF->getInfo<SparcMachineFunctionInfo>();
+ unsigned GlobalBaseReg = SparcFI->getGlobalBaseReg();
+ if (GlobalBaseReg != 0)
+ return GlobalBaseReg;
+
+ // Insert the set of GlobalBaseReg into the first MBB of the function
+ MachineBasicBlock &FirstMBB = MF->front();
+ MachineBasicBlock::iterator MBBI = FirstMBB.begin();
+ MachineRegisterInfo &RegInfo = MF->getRegInfo();
+
+ const TargetRegisterClass *PtrRC =
+ Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
+ GlobalBaseReg = RegInfo.createVirtualRegister(PtrRC);
+
+ DebugLoc dl;
+
+ BuildMI(FirstMBB, MBBI, dl, get(SP::GETPCX), GlobalBaseReg);
+ SparcFI->setGlobalBaseReg(GlobalBaseReg);
+ return GlobalBaseReg;
+}
+
+bool SparcInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
+ switch (MI.getOpcode()) {
+ case TargetOpcode::LOAD_STACK_GUARD: {
+ assert(Subtarget.isTargetLinux() &&
+ "Only Linux target is expected to contain LOAD_STACK_GUARD");
+ // offsetof(tcbhead_t, stack_guard) from sysdeps/sparc/nptl/tls.h in glibc.
+ const int64_t Offset = Subtarget.is64Bit() ? 0x28 : 0x14;
+ MI.setDesc(get(Subtarget.is64Bit() ? SP::LDXri : SP::LDri));
+ MachineInstrBuilder(*MI.getParent()->getParent(), MI)
+ .addReg(SP::G7)
+ .addImm(Offset);
+ return true;
+ }
+ }
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
new file mode 100644
index 000000000000..524b5d054163
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.h
@@ -0,0 +1,108 @@
+//===-- SparcInstrInfo.h - Sparc Instruction Information --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Sparc implementation of the TargetInstrInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCINSTRINFO_H
+#define LLVM_LIB_TARGET_SPARC_SPARCINSTRINFO_H
+
+#include "SparcRegisterInfo.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+
+#define GET_INSTRINFO_HEADER
+#include "SparcGenInstrInfo.inc"
+
+namespace llvm {
+
+class SparcSubtarget;
+
+/// SPII - This namespace holds all of the target specific flags that
+/// instruction info tracks.
+///
+namespace SPII {
+ enum {
+ Pseudo = (1<<0),
+ Load = (1<<1),
+ Store = (1<<2),
+ DelaySlot = (1<<3)
+ };
+}
+
+class SparcInstrInfo : public SparcGenInstrInfo {
+ const SparcRegisterInfo RI;
+ const SparcSubtarget& Subtarget;
+ virtual void anchor();
+public:
+ explicit SparcInstrInfo(SparcSubtarget &ST);
+
+ /// getRegisterInfo - TargetInstrInfo is a superset of MRegister info. As
+ /// such, whenever a client has an instance of instruction info, it should
+ /// always be able to get register info as well (through this method).
+ ///
+ const SparcRegisterInfo &getRegisterInfo() const { return RI; }
+
+ /// isLoadFromStackSlot - If the specified machine instruction is a direct
+ /// load from a stack slot, return the virtual or physical register number of
+ /// the destination along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than loading from the stack slot.
+ unsigned isLoadFromStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ /// isStoreToStackSlot - If the specified machine instruction is a direct
+ /// store to a stack slot, return the virtual or physical register number of
+ /// the source reg along with the FrameIndex of the loaded stack slot. If
+ /// not, return 0. This predicate must return 0 if the instruction has
+ /// any side effects other than storing to the stack slot.
+ unsigned isStoreToStackSlot(const MachineInstr &MI,
+ int &FrameIndex) const override;
+
+ bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,
+ MachineBasicBlock *&FBB,
+ SmallVectorImpl<MachineOperand> &Cond,
+ bool AllowModify = false) const override;
+
+ unsigned removeBranch(MachineBasicBlock &MBB,
+ int *BytesRemoved = nullptr) const override;
+
+ unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
+ MachineBasicBlock *FBB, ArrayRef<MachineOperand> Cond,
+ const DebugLoc &DL,
+ int *BytesAdded = nullptr) const override;
+
+ bool
+ reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const override;
+
+ void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
+ const DebugLoc &DL, unsigned DestReg, unsigned SrcReg,
+ bool KillSrc) const override;
+
+ void storeRegToStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned SrcReg, bool isKill, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ void loadRegFromStackSlot(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator MBBI,
+ unsigned DestReg, int FrameIndex,
+ const TargetRegisterClass *RC,
+ const TargetRegisterInfo *TRI) const override;
+
+ unsigned getGlobalBaseReg(MachineFunction *MF) const;
+
+ // Lower pseudo instructions after register allocation.
+ bool expandPostRAPseudo(MachineInstr &MI) const override;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
new file mode 100644
index 000000000000..08bccbde0bd6
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrInfo.td
@@ -0,0 +1,1694 @@
+//===-- SparcInstrInfo.td - Target Description for Sparc Target -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes the Sparc instructions in TableGen format.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Instruction format superclass
+//===----------------------------------------------------------------------===//
+
+include "SparcInstrFormats.td"
+
+//===----------------------------------------------------------------------===//
+// Feature predicates.
+//===----------------------------------------------------------------------===//
+
+// True when generating 32-bit code.
+def Is32Bit : Predicate<"!Subtarget->is64Bit()">;
+
+// True when generating 64-bit code. This also implies HasV9.
+def Is64Bit : Predicate<"Subtarget->is64Bit()">;
+
+def UseSoftMulDiv : Predicate<"Subtarget->useSoftMulDiv()">,
+ AssemblerPredicate<"FeatureSoftMulDiv">;
+
+// HasV9 - This predicate is true when the target processor supports V9
+// instructions. Note that the machine may be running in 32-bit mode.
+def HasV9 : Predicate<"Subtarget->isV9()">,
+ AssemblerPredicate<"FeatureV9">;
+
+// HasNoV9 - This predicate is true when the target doesn't have V9
+// instructions. Use of this is just a hack for the isel not having proper
+// costs for V8 instructions that are more expensive than their V9 ones.
+def HasNoV9 : Predicate<"!Subtarget->isV9()">;
+
+// HasVIS - This is true when the target processor has VIS extensions.
+def HasVIS : Predicate<"Subtarget->isVIS()">,
+ AssemblerPredicate<"FeatureVIS">;
+def HasVIS2 : Predicate<"Subtarget->isVIS2()">,
+ AssemblerPredicate<"FeatureVIS2">;
+def HasVIS3 : Predicate<"Subtarget->isVIS3()">,
+ AssemblerPredicate<"FeatureVIS3">;
+
+// HasHardQuad - This is true when the target processor supports quad floating
+// point instructions.
+def HasHardQuad : Predicate<"Subtarget->hasHardQuad()">;
+
+// HasLeonCASA - This is true when the target processor supports the CASA
+// instruction
+def HasLeonCASA : Predicate<"Subtarget->hasLeonCasa()">;
+
+// HasUMAC_SMAC - This is true when the target processor supports the
+// UMAC and SMAC instructions
+def HasUMAC_SMAC : Predicate<"Subtarget->hasUmacSmac()">;
+
+def HasNoFdivSqrtFix : Predicate<"!Subtarget->fixAllFDIVSQRT()">;
+def HasFMULS : Predicate<"!Subtarget->hasNoFMULS()">;
+def HasFSMULD : Predicate<"!Subtarget->hasNoFSMULD()">;
+
+// UseDeprecatedInsts - This predicate is true when the target processor is a
+// V8, or when it is V9 but the V8 deprecated instructions are efficient enough
+// to use when appropriate. In either of these cases, the instruction selector
+// will pick deprecated instructions.
+def UseDeprecatedInsts : Predicate<"Subtarget->useDeprecatedV8Instructions()">;
+
+//===----------------------------------------------------------------------===//
+// Instruction Pattern Stuff
+//===----------------------------------------------------------------------===//
+
+def simm11 : PatLeaf<(imm), [{ return isInt<11>(N->getSExtValue()); }]>;
+
+def simm13 : PatLeaf<(imm), [{ return isInt<13>(N->getSExtValue()); }]>;
+
+def LO10 : SDNodeXForm<imm, [{
+ return CurDAG->getTargetConstant((unsigned)N->getZExtValue() & 1023, SDLoc(N),
+ MVT::i32);
+}]>;
+
+def HI22 : SDNodeXForm<imm, [{
+ // Transformation function: shift the immediate value down into the low bits.
+ return CurDAG->getTargetConstant((unsigned)N->getZExtValue() >> 10, SDLoc(N),
+ MVT::i32);
+}]>;
+
+def SETHIimm : PatLeaf<(imm), [{
+ return isShiftedUInt<22, 10>(N->getZExtValue());
+}], HI22>;
+
+// Addressing modes.
+def ADDRrr : ComplexPattern<iPTR, 2, "SelectADDRrr", [], []>;
+def ADDRri : ComplexPattern<iPTR, 2, "SelectADDRri", [frameindex], []>;
+
+// Address operands
+def SparcMEMrrAsmOperand : AsmOperandClass {
+ let Name = "MEMrr";
+ let ParserMethod = "parseMEMOperand";
+}
+
+def SparcMEMriAsmOperand : AsmOperandClass {
+ let Name = "MEMri";
+ let ParserMethod = "parseMEMOperand";
+}
+
+def MEMrr : Operand<iPTR> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops ptr_rc, ptr_rc);
+ let ParserMatchClass = SparcMEMrrAsmOperand;
+}
+def MEMri : Operand<iPTR> {
+ let PrintMethod = "printMemOperand";
+ let MIOperandInfo = (ops ptr_rc, i32imm);
+ let ParserMatchClass = SparcMEMriAsmOperand;
+}
+
+def TLSSym : Operand<iPTR>;
+
+// Branch targets have OtherVT type.
+def brtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchTargetOpValue";
+}
+
+def bprtarget : Operand<OtherVT> {
+ let EncoderMethod = "getBranchPredTargetOpValue";
+}
+
+def bprtarget16 : Operand<OtherVT> {
+ let EncoderMethod = "getBranchOnRegTargetOpValue";
+}
+
+def calltarget : Operand<i32> {
+ let EncoderMethod = "getCallTargetOpValue";
+ let DecoderMethod = "DecodeCall";
+}
+
+def simm13Op : Operand<i32> {
+ let DecoderMethod = "DecodeSIMM13";
+}
+
+// Operand for printing out a condition code.
+let PrintMethod = "printCCOperand" in
+ def CCOp : Operand<i32>;
+
+def SDTSPcmpicc :
+SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisSameAs<0, 1>]>;
+def SDTSPcmpfcc :
+SDTypeProfile<0, 2, [SDTCisFP<0>, SDTCisSameAs<0, 1>]>;
+def SDTSPbrcc :
+SDTypeProfile<0, 2, [SDTCisVT<0, OtherVT>, SDTCisVT<1, i32>]>;
+def SDTSPselectcc :
+SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, SDTCisSameAs<1, 2>, SDTCisVT<3, i32>]>;
+def SDTSPFTOI :
+SDTypeProfile<1, 1, [SDTCisVT<0, f32>, SDTCisFP<1>]>;
+def SDTSPITOF :
+SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f32>]>;
+def SDTSPFTOX :
+SDTypeProfile<1, 1, [SDTCisVT<0, f64>, SDTCisFP<1>]>;
+def SDTSPXTOF :
+SDTypeProfile<1, 1, [SDTCisFP<0>, SDTCisVT<1, f64>]>;
+
+def SDTSPtlsadd :
+SDTypeProfile<1, 3, [SDTCisInt<0>, SDTCisSameAs<0, 1>, SDTCisPtrTy<2>]>;
+def SDTSPtlsld :
+SDTypeProfile<1, 2, [SDTCisPtrTy<0>, SDTCisPtrTy<1>]>;
+
+def SDTSPeh_sjlj_setjmp : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisPtrTy<1>]>;
+def SDTSPeh_sjlj_longjmp: SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>;
+
+def SPcmpicc : SDNode<"SPISD::CMPICC", SDTSPcmpicc, [SDNPOutGlue]>;
+def SPcmpfcc : SDNode<"SPISD::CMPFCC", SDTSPcmpfcc, [SDNPOutGlue]>;
+def SPbricc : SDNode<"SPISD::BRICC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbrxcc : SDNode<"SPISD::BRXCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+def SPbrfcc : SDNode<"SPISD::BRFCC", SDTSPbrcc, [SDNPHasChain, SDNPInGlue]>;
+
+def SPhi : SDNode<"SPISD::Hi", SDTIntUnaryOp>;
+def SPlo : SDNode<"SPISD::Lo", SDTIntUnaryOp>;
+
+def SPftoi : SDNode<"SPISD::FTOI", SDTSPFTOI>;
+def SPitof : SDNode<"SPISD::ITOF", SDTSPITOF>;
+def SPftox : SDNode<"SPISD::FTOX", SDTSPFTOX>;
+def SPxtof : SDNode<"SPISD::XTOF", SDTSPXTOF>;
+
+def SPselecticc : SDNode<"SPISD::SELECT_ICC", SDTSPselectcc, [SDNPInGlue]>;
+def SPselectxcc : SDNode<"SPISD::SELECT_XCC", SDTSPselectcc, [SDNPInGlue]>;
+def SPselectfcc : SDNode<"SPISD::SELECT_FCC", SDTSPselectcc, [SDNPInGlue]>;
+
+def SPsjlj_setjmp: SDNode<"SPISD::EH_SJLJ_SETJMP",
+ SDTSPeh_sjlj_setjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
+def SPsjlj_longjmp: SDNode<"SPISD::EH_SJLJ_LONGJMP",
+ SDTSPeh_sjlj_longjmp,
+ [SDNPHasChain, SDNPSideEffect]>;
+
+// These are target-independent nodes, but have target-specific formats.
+def SDT_SPCallSeqStart : SDCallSeqStart<[ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32> ]>;
+def SDT_SPCallSeqEnd : SDCallSeqEnd<[ SDTCisVT<0, i32>,
+ SDTCisVT<1, i32> ]>;
+
+def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_SPCallSeqStart,
+ [SDNPHasChain, SDNPOutGlue]>;
+def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_SPCallSeqEnd,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>;
+
+def SDT_SPCall : SDTypeProfile<0, -1, [SDTCisVT<0, i32>]>;
+def call : SDNode<"SPISD::CALL", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+
+def SDT_SPRet : SDTypeProfile<0, 1, [SDTCisVT<0, i32>]>;
+def retflag : SDNode<"SPISD::RET_FLAG", SDT_SPRet,
+ [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
+
+def flushw : SDNode<"SPISD::FLUSHW", SDTNone,
+ [SDNPHasChain, SDNPSideEffect, SDNPMayStore]>;
+
+def tlsadd : SDNode<"SPISD::TLS_ADD", SDTSPtlsadd>;
+def tlsld : SDNode<"SPISD::TLS_LD", SDTSPtlsld>;
+def tlscall : SDNode<"SPISD::TLS_CALL", SDT_SPCall,
+ [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue,
+ SDNPVariadic]>;
+
+def getPCX : Operand<iPTR> {
+ let PrintMethod = "printGetPCX";
+}
+
+//===----------------------------------------------------------------------===//
+// SPARC Flag Conditions
+//===----------------------------------------------------------------------===//
+
+// Note that these values must be kept in sync with the CCOp::CondCode enum
+// values.
+class ICC_VAL<int N> : PatLeaf<(i32 N)>;
+def ICC_NE : ICC_VAL< 9>; // Not Equal
+def ICC_E : ICC_VAL< 1>; // Equal
+def ICC_G : ICC_VAL<10>; // Greater
+def ICC_LE : ICC_VAL< 2>; // Less or Equal
+def ICC_GE : ICC_VAL<11>; // Greater or Equal
+def ICC_L : ICC_VAL< 3>; // Less
+def ICC_GU : ICC_VAL<12>; // Greater Unsigned
+def ICC_LEU : ICC_VAL< 4>; // Less or Equal Unsigned
+def ICC_CC : ICC_VAL<13>; // Carry Clear/Great or Equal Unsigned
+def ICC_CS : ICC_VAL< 5>; // Carry Set/Less Unsigned
+def ICC_POS : ICC_VAL<14>; // Positive
+def ICC_NEG : ICC_VAL< 6>; // Negative
+def ICC_VC : ICC_VAL<15>; // Overflow Clear
+def ICC_VS : ICC_VAL< 7>; // Overflow Set
+
+class FCC_VAL<int N> : PatLeaf<(i32 N)>;
+def FCC_U : FCC_VAL<23>; // Unordered
+def FCC_G : FCC_VAL<22>; // Greater
+def FCC_UG : FCC_VAL<21>; // Unordered or Greater
+def FCC_L : FCC_VAL<20>; // Less
+def FCC_UL : FCC_VAL<19>; // Unordered or Less
+def FCC_LG : FCC_VAL<18>; // Less or Greater
+def FCC_NE : FCC_VAL<17>; // Not Equal
+def FCC_E : FCC_VAL<25>; // Equal
+def FCC_UE : FCC_VAL<26>; // Unordered or Equal
+def FCC_GE : FCC_VAL<27>; // Greater or Equal
+def FCC_UGE : FCC_VAL<28>; // Unordered or Greater or Equal
+def FCC_LE : FCC_VAL<29>; // Less or Equal
+def FCC_ULE : FCC_VAL<30>; // Unordered or Less or Equal
+def FCC_O : FCC_VAL<31>; // Ordered
+
+class CPCC_VAL<int N> : PatLeaf<(i32 N)>;
+def CPCC_3 : CPCC_VAL<39>; // 3
+def CPCC_2 : CPCC_VAL<38>; // 2
+def CPCC_23 : CPCC_VAL<37>; // 2 or 3
+def CPCC_1 : CPCC_VAL<36>; // 1
+def CPCC_13 : CPCC_VAL<35>; // 1 or 3
+def CPCC_12 : CPCC_VAL<34>; // 1 or 2
+def CPCC_123 : CPCC_VAL<33>; // 1 or 2 or 3
+def CPCC_0 : CPCC_VAL<41>; // 0
+def CPCC_03 : CPCC_VAL<42>; // 0 or 3
+def CPCC_02 : CPCC_VAL<43>; // 0 or 2
+def CPCC_023 : CPCC_VAL<44>; // 0 or 2 or 3
+def CPCC_01 : CPCC_VAL<45>; // 0 or 1
+def CPCC_013 : CPCC_VAL<46>; // 0 or 1 or 3
+def CPCC_012 : CPCC_VAL<47>; // 0 or 1 or 2
+
+//===----------------------------------------------------------------------===//
+// Instruction Class Templates
+//===----------------------------------------------------------------------===//
+
+/// F3_12 multiclass - Define a normal F3_1/F3_2 pattern in one shot.
+multiclass F3_12<string OpcStr, bits<6> Op3Val, SDNode OpNode,
+ RegisterClass RC, ValueType Ty, Operand immOp,
+ InstrItinClass itin = IIC_iu_instr> {
+ def rr : F3_1<2, Op3Val,
+ (outs RC:$rd), (ins RC:$rs1, RC:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"),
+ [(set Ty:$rd, (OpNode Ty:$rs1, Ty:$rs2))],
+ itin>;
+ def ri : F3_2<2, Op3Val,
+ (outs RC:$rd), (ins RC:$rs1, immOp:$simm13),
+ !strconcat(OpcStr, " $rs1, $simm13, $rd"),
+ [(set Ty:$rd, (OpNode Ty:$rs1, (Ty simm13:$simm13)))],
+ itin>;
+}
+
+/// F3_12np multiclass - Define a normal F3_1/F3_2 pattern in one shot, with no
+/// pattern.
+multiclass F3_12np<string OpcStr, bits<6> Op3Val, InstrItinClass itin = IIC_iu_instr> {
+ def rr : F3_1<2, Op3Val,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), [],
+ itin>;
+ def ri : F3_2<2, Op3Val,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ !strconcat(OpcStr, " $rs1, $simm13, $rd"), [],
+ itin>;
+}
+
+// Load multiclass - Define both Reg+Reg/Reg+Imm patterns in one shot.
+multiclass Load<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = IIC_iu_instr> {
+ def rr : F3_1<3, Op3Val,
+ (outs RC:$dst), (ins MEMrr:$addr),
+ !strconcat(OpcStr, " [$addr], $dst"),
+ [(set Ty:$dst, (OpNode ADDRrr:$addr))],
+ itin>;
+ def ri : F3_2<3, Op3Val,
+ (outs RC:$dst), (ins MEMri:$addr),
+ !strconcat(OpcStr, " [$addr], $dst"),
+ [(set Ty:$dst, (OpNode ADDRri:$addr))],
+ itin>;
+}
+
+// TODO: Instructions of the LoadASI class are currently asm only; hooking up
+// CodeGen's address spaces to use these is a future task.
+class LoadASI<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = NoItinerary> :
+ F3_1_asi<3, Op3Val, (outs RC:$dst), (ins MEMrr:$addr, i8imm:$asi),
+ !strconcat(OpcStr, "a [$addr] $asi, $dst"),
+ []>;
+
+// LoadA multiclass - As above, but also define alternate address space variant
+multiclass LoadA<string OpcStr, bits<6> Op3Val, bits<6> LoadAOp3Val,
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = NoItinerary> :
+ Load<OpcStr, Op3Val, OpNode, RC, Ty, itin> {
+ def Arr : LoadASI<OpcStr, LoadAOp3Val, OpNode, RC, Ty>;
+}
+
+// The LDSTUB instruction is supported for asm only.
+// It is unlikely that general-purpose code could make use of it.
+// CAS is preferred for sparc v9.
+def LDSTUBrr : F3_1<3, 0b001101, (outs IntRegs:$dst), (ins MEMrr:$addr),
+ "ldstub [$addr], $dst", []>;
+def LDSTUBri : F3_2<3, 0b001101, (outs IntRegs:$dst), (ins MEMri:$addr),
+ "ldstub [$addr], $dst", []>;
+def LDSTUBArr : F3_1_asi<3, 0b011101, (outs IntRegs:$dst),
+ (ins MEMrr:$addr, i8imm:$asi),
+ "ldstuba [$addr] $asi, $dst", []>;
+
+// Store multiclass - Define both Reg+Reg/Reg+Imm patterns in one shot.
+multiclass Store<string OpcStr, bits<6> Op3Val, SDPatternOperator OpNode,
+ RegisterClass RC, ValueType Ty, InstrItinClass itin = IIC_st> {
+ def rr : F3_1<3, Op3Val,
+ (outs), (ins MEMrr:$addr, RC:$rd),
+ !strconcat(OpcStr, " $rd, [$addr]"),
+ [(OpNode Ty:$rd, ADDRrr:$addr)],
+ itin>;
+ def ri : F3_2<3, Op3Val,
+ (outs), (ins MEMri:$addr, RC:$rd),
+ !strconcat(OpcStr, " $rd, [$addr]"),
+ [(OpNode Ty:$rd, ADDRri:$addr)],
+ itin>;
+}
+
+// TODO: Instructions of the StoreASI class are currently asm only; hooking up
+// CodeGen's address spaces to use these is a future task.
+class StoreASI<string OpcStr, bits<6> Op3Val,
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = IIC_st> :
+ F3_1_asi<3, Op3Val, (outs), (ins MEMrr:$addr, RC:$rd, i8imm:$asi),
+ !strconcat(OpcStr, "a $rd, [$addr] $asi"),
+ [],
+ itin>;
+
+multiclass StoreA<string OpcStr, bits<6> Op3Val, bits<6> StoreAOp3Val,
+ SDPatternOperator OpNode, RegisterClass RC, ValueType Ty,
+ InstrItinClass itin = IIC_st> :
+ Store<OpcStr, Op3Val, OpNode, RC, Ty> {
+ def Arr : StoreASI<OpcStr, StoreAOp3Val, OpNode, RC, Ty, itin>;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions
+//===----------------------------------------------------------------------===//
+
+// Pseudo instructions.
+class Pseudo<dag outs, dag ins, string asmstr, list<dag> pattern>
+ : InstSP<outs, ins, asmstr, pattern> {
+ let isCodeGenOnly = 1;
+ let isPseudo = 1;
+}
+
+// GETPCX for PIC
+let Defs = [O7] in {
+ def GETPCX : Pseudo<(outs getPCX:$getpcseq), (ins), "$getpcseq", [] >;
+}
+
+let Defs = [O6], Uses = [O6] in {
+def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "!ADJCALLSTACKDOWN $amt1, $amt2",
+ [(callseq_start timm:$amt1, timm:$amt2)]>;
+def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2),
+ "!ADJCALLSTACKUP $amt1",
+ [(callseq_end timm:$amt1, timm:$amt2)]>;
+}
+
+let hasSideEffects = 1, mayStore = 1 in {
+ let rd = 0, rs1 = 0, rs2 = 0 in
+ def FLUSHW : F3_1<0b10, 0b101011, (outs), (ins),
+ "flushw",
+ [(flushw)]>, Requires<[HasV9]>;
+ let rd = 0, rs1 = 1, simm13 = 3 in
+ def TA3 : F3_2<0b10, 0b111010, (outs), (ins),
+ "ta 3",
+ [(flushw)]>;
+}
+
+// SELECT_CC_* - Used to implement the SELECT_CC DAG operation. Expanded after
+// instruction selection into a branch sequence. This has to handle all
+// permutations of selection between i32/f32/f64 on ICC and FCC.
+// Expanded after instruction selection.
+let Uses = [ICC], usesCustomInserter = 1 in {
+ def SELECT_CC_Int_ICC
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_Int_ICC PSEUDO!",
+ [(set i32:$dst, (SPselecticc i32:$T, i32:$F, imm:$Cond))]>;
+ def SELECT_CC_FP_ICC
+ : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_FP_ICC PSEUDO!",
+ [(set f32:$dst, (SPselecticc f32:$T, f32:$F, imm:$Cond))]>;
+
+ def SELECT_CC_DFP_ICC
+ : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_DFP_ICC PSEUDO!",
+ [(set f64:$dst, (SPselecticc f64:$T, f64:$F, imm:$Cond))]>;
+
+ def SELECT_CC_QFP_ICC
+ : Pseudo<(outs QFPRegs:$dst), (ins QFPRegs:$T, QFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_QFP_ICC PSEUDO!",
+ [(set f128:$dst, (SPselecticc f128:$T, f128:$F, imm:$Cond))]>;
+}
+
+let usesCustomInserter = 1, Uses = [FCC0] in {
+
+ def SELECT_CC_Int_FCC
+ : Pseudo<(outs IntRegs:$dst), (ins IntRegs:$T, IntRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_Int_FCC PSEUDO!",
+ [(set i32:$dst, (SPselectfcc i32:$T, i32:$F, imm:$Cond))]>;
+
+ def SELECT_CC_FP_FCC
+ : Pseudo<(outs FPRegs:$dst), (ins FPRegs:$T, FPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_FP_FCC PSEUDO!",
+ [(set f32:$dst, (SPselectfcc f32:$T, f32:$F, imm:$Cond))]>;
+ def SELECT_CC_DFP_FCC
+ : Pseudo<(outs DFPRegs:$dst), (ins DFPRegs:$T, DFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_DFP_FCC PSEUDO!",
+ [(set f64:$dst, (SPselectfcc f64:$T, f64:$F, imm:$Cond))]>;
+ def SELECT_CC_QFP_FCC
+ : Pseudo<(outs QFPRegs:$dst), (ins QFPRegs:$T, QFPRegs:$F, i32imm:$Cond),
+ "; SELECT_CC_QFP_FCC PSEUDO!",
+ [(set f128:$dst, (SPselectfcc f128:$T, f128:$F, imm:$Cond))]>;
+}
+
+let hasSideEffects = 1, isBarrier = 1, usesCustomInserter = 1 in {
+ let Defs = [WIM] in
+ def EH_SJLJ_SETJMP32ri : Pseudo<(outs IntRegs:$dst), (ins MEMri:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set i32:$dst, (SPsjlj_setjmp ADDRri:$buf))]>,
+ Requires<[Is32Bit]>;
+ def EH_SJLJ_SETJMP32rr : Pseudo<(outs IntRegs:$dst), (ins MEMrr:$buf),
+ "#EH_SJLJ_SETJMP32",
+ [(set i32:$dst, (SPsjlj_setjmp ADDRrr:$buf))]>,
+ Requires<[Is32Bit]>;
+ let isTerminator = 1 in
+ def EH_SJLJ_LONGJMP32ri : Pseudo<(outs), (ins MEMri:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(SPsjlj_longjmp ADDRri:$buf)]>,
+ Requires<[Is32Bit]>;
+ def EH_SJLJ_LONGJMP32rr : Pseudo<(outs), (ins MEMrr:$buf),
+ "#EH_SJLJ_LONGJMP32",
+ [(SPsjlj_longjmp ADDRrr:$buf)]>,
+ Requires<[Is32Bit]>;
+}
+
+// Section B.1 - Load Integer Instructions, p. 90
+let DecoderMethod = "DecodeLoadInt" in {
+ defm LDSB : LoadA<"ldsb", 0b001001, 0b011001, sextloadi8, IntRegs, i32>;
+ defm LDSH : LoadA<"ldsh", 0b001010, 0b011010, sextloadi16, IntRegs, i32>;
+ defm LDUB : LoadA<"ldub", 0b000001, 0b010001, zextloadi8, IntRegs, i32>;
+ defm LDUH : LoadA<"lduh", 0b000010, 0b010010, zextloadi16, IntRegs, i32>;
+ defm LD : LoadA<"ld", 0b000000, 0b010000, load, IntRegs, i32>;
+}
+
+let DecoderMethod = "DecodeLoadIntPair" in
+ defm LDD : LoadA<"ldd", 0b000011, 0b010011, load, IntPair, v2i32, IIC_ldd>;
+
+// Section B.2 - Load Floating-point Instructions, p. 92
+let DecoderMethod = "DecodeLoadFP" in {
+ defm LDF : Load<"ld", 0b100000, load, FPRegs, f32, IIC_iu_or_fpu_instr>;
+ def LDFArr : LoadASI<"ld", 0b110000, load, FPRegs, f32, IIC_iu_or_fpu_instr>,
+ Requires<[HasV9]>;
+}
+let DecoderMethod = "DecodeLoadDFP" in {
+ defm LDDF : Load<"ldd", 0b100011, load, DFPRegs, f64, IIC_ldd>;
+ def LDDFArr : LoadASI<"ldd", 0b110011, load, DFPRegs, f64>,
+ Requires<[HasV9]>;
+}
+let DecoderMethod = "DecodeLoadQFP" in
+ defm LDQF : LoadA<"ldq", 0b100010, 0b110010, load, QFPRegs, f128>,
+ Requires<[HasV9, HasHardQuad]>;
+
+let DecoderMethod = "DecodeLoadCP" in
+ defm LDC : Load<"ld", 0b110000, load, CoprocRegs, i32>;
+let DecoderMethod = "DecodeLoadCPPair" in
+ defm LDDC : Load<"ldd", 0b110011, load, CoprocPair, v2i32, IIC_ldd>;
+
+let DecoderMethod = "DecodeLoadCP", Defs = [CPSR] in {
+ let rd = 0 in {
+ def LDCSRrr : F3_1<3, 0b110001, (outs), (ins MEMrr:$addr),
+ "ld [$addr], %csr", []>;
+ def LDCSRri : F3_2<3, 0b110001, (outs), (ins MEMri:$addr),
+ "ld [$addr], %csr", []>;
+ }
+}
+
+let DecoderMethod = "DecodeLoadFP" in
+ let Defs = [FSR] in {
+ let rd = 0 in {
+ def LDFSRrr : F3_1<3, 0b100001, (outs), (ins MEMrr:$addr),
+ "ld [$addr], %fsr", [], IIC_iu_or_fpu_instr>;
+ def LDFSRri : F3_2<3, 0b100001, (outs), (ins MEMri:$addr),
+ "ld [$addr], %fsr", [], IIC_iu_or_fpu_instr>;
+ }
+ let rd = 1 in {
+ def LDXFSRrr : F3_1<3, 0b100001, (outs), (ins MEMrr:$addr),
+ "ldx [$addr], %fsr", []>, Requires<[HasV9]>;
+ def LDXFSRri : F3_2<3, 0b100001, (outs), (ins MEMri:$addr),
+ "ldx [$addr], %fsr", []>, Requires<[HasV9]>;
+ }
+ }
+
+// Section B.4 - Store Integer Instructions, p. 95
+let DecoderMethod = "DecodeStoreInt" in {
+ defm STB : StoreA<"stb", 0b000101, 0b010101, truncstorei8, IntRegs, i32>;
+ defm STH : StoreA<"sth", 0b000110, 0b010110, truncstorei16, IntRegs, i32>;
+ defm ST : StoreA<"st", 0b000100, 0b010100, store, IntRegs, i32>;
+}
+
+let DecoderMethod = "DecodeStoreIntPair" in
+ defm STD : StoreA<"std", 0b000111, 0b010111, store, IntPair, v2i32, IIC_std>;
+
+// Section B.5 - Store Floating-point Instructions, p. 97
+let DecoderMethod = "DecodeStoreFP" in {
+ defm STF : Store<"st", 0b100100, store, FPRegs, f32>;
+ def STFArr : StoreASI<"st", 0b110100, store, FPRegs, f32>,
+ Requires<[HasV9]>;
+}
+let DecoderMethod = "DecodeStoreDFP" in {
+ defm STDF : Store<"std", 0b100111, store, DFPRegs, f64, IIC_std>;
+ def STDFArr : StoreASI<"std", 0b110111, store, DFPRegs, f64>,
+ Requires<[HasV9]>;
+}
+let DecoderMethod = "DecodeStoreQFP" in
+ defm STQF : StoreA<"stq", 0b100110, 0b110110, store, QFPRegs, f128>,
+ Requires<[HasV9, HasHardQuad]>;
+
+let DecoderMethod = "DecodeStoreCP" in
+ defm STC : Store<"st", 0b110100, store, CoprocRegs, i32>;
+
+let DecoderMethod = "DecodeStoreCPPair" in
+ defm STDC : Store<"std", 0b110111, store, CoprocPair, v2i32, IIC_std>;
+
+let DecoderMethod = "DecodeStoreCP", rd = 0 in {
+ let Defs = [CPSR] in {
+ def STCSRrr : F3_1<3, 0b110101, (outs MEMrr:$addr), (ins),
+ "st %csr, [$addr]", [], IIC_st>;
+ def STCSRri : F3_2<3, 0b110101, (outs MEMri:$addr), (ins),
+ "st %csr, [$addr]", [], IIC_st>;
+ }
+ let Defs = [CPQ] in {
+ def STDCQrr : F3_1<3, 0b110110, (outs MEMrr:$addr), (ins),
+ "std %cq, [$addr]", [], IIC_std>;
+ def STDCQri : F3_2<3, 0b110110, (outs MEMri:$addr), (ins),
+ "std %cq, [$addr]", [], IIC_std>;
+ }
+}
+
+let DecoderMethod = "DecodeStoreFP" in {
+ let rd = 0 in {
+ let Defs = [FSR] in {
+ def STFSRrr : F3_1<3, 0b100101, (outs MEMrr:$addr), (ins),
+ "st %fsr, [$addr]", [], IIC_st>;
+ def STFSRri : F3_2<3, 0b100101, (outs MEMri:$addr), (ins),
+ "st %fsr, [$addr]", [], IIC_st>;
+ }
+ let Defs = [FQ] in {
+ def STDFQrr : F3_1<3, 0b100110, (outs MEMrr:$addr), (ins),
+ "std %fq, [$addr]", [], IIC_std>;
+ def STDFQri : F3_2<3, 0b100110, (outs MEMri:$addr), (ins),
+ "std %fq, [$addr]", [], IIC_std>;
+ }
+ }
+ let rd = 1, Defs = [FSR] in {
+ def STXFSRrr : F3_1<3, 0b100101, (outs MEMrr:$addr), (ins),
+ "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
+ def STXFSRri : F3_2<3, 0b100101, (outs MEMri:$addr), (ins),
+ "stx %fsr, [$addr]", []>, Requires<[HasV9]>;
+ }
+}
+
+// Section B.8 - SWAP Register with Memory Instruction
+// (Atomic swap)
+let Constraints = "$val = $dst", DecoderMethod = "DecodeSWAP" in {
+ def SWAPrr : F3_1<3, 0b001111,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, IntRegs:$val),
+ "swap [$addr], $dst",
+ [(set i32:$dst, (atomic_swap_32 ADDRrr:$addr, i32:$val))]>;
+ def SWAPri : F3_2<3, 0b001111,
+ (outs IntRegs:$dst), (ins MEMri:$addr, IntRegs:$val),
+ "swap [$addr], $dst",
+ [(set i32:$dst, (atomic_swap_32 ADDRri:$addr, i32:$val))]>;
+ def SWAPArr : F3_1_asi<3, 0b011111,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, i8imm:$asi, IntRegs:$val),
+ "swapa [$addr] $asi, $dst",
+ [/*FIXME: pattern?*/]>;
+}
+
+
+// Section B.9 - SETHI Instruction, p. 104
+def SETHIi: F2_1<0b100,
+ (outs IntRegs:$rd), (ins i32imm:$imm22),
+ "sethi $imm22, $rd",
+ [(set i32:$rd, SETHIimm:$imm22)],
+ IIC_iu_instr>;
+
+// Section B.10 - NOP Instruction, p. 105
+// (It's a special case of SETHI)
+let rd = 0, imm22 = 0 in
+ def NOP : F2_1<0b100, (outs), (ins), "nop", []>;
+
+// Section B.11 - Logical Instructions, p. 106
+defm AND : F3_12<"and", 0b000001, and, IntRegs, i32, simm13Op>;
+
+def ANDNrr : F3_1<2, 0b000101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "andn $rs1, $rs2, $rd",
+ [(set i32:$rd, (and i32:$rs1, (not i32:$rs2)))]>;
+def ANDNri : F3_2<2, 0b000101,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "andn $rs1, $simm13, $rd", []>;
+
+defm OR : F3_12<"or", 0b000010, or, IntRegs, i32, simm13Op>;
+
+def ORNrr : F3_1<2, 0b000110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "orn $rs1, $rs2, $rd",
+ [(set i32:$rd, (or i32:$rs1, (not i32:$rs2)))]>;
+def ORNri : F3_2<2, 0b000110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "orn $rs1, $simm13, $rd", []>;
+defm XOR : F3_12<"xor", 0b000011, xor, IntRegs, i32, simm13Op>;
+
+def XNORrr : F3_1<2, 0b000111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "xnor $rs1, $rs2, $rd",
+ [(set i32:$rd, (not (xor i32:$rs1, i32:$rs2)))]>;
+def XNORri : F3_2<2, 0b000111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "xnor $rs1, $simm13, $rd", []>;
+
+let Defs = [ICC] in {
+ defm ANDCC : F3_12np<"andcc", 0b010001>;
+ defm ANDNCC : F3_12np<"andncc", 0b010101>;
+ defm ORCC : F3_12np<"orcc", 0b010010>;
+ defm ORNCC : F3_12np<"orncc", 0b010110>;
+ defm XORCC : F3_12np<"xorcc", 0b010011>;
+ defm XNORCC : F3_12np<"xnorcc", 0b010111>;
+}
+
+// Section B.12 - Shift Instructions, p. 107
+defm SLL : F3_12<"sll", 0b100101, shl, IntRegs, i32, simm13Op>;
+defm SRL : F3_12<"srl", 0b100110, srl, IntRegs, i32, simm13Op>;
+defm SRA : F3_12<"sra", 0b100111, sra, IntRegs, i32, simm13Op>;
+
+// Section B.13 - Add Instructions, p. 108
+defm ADD : F3_12<"add", 0b000000, add, IntRegs, i32, simm13Op>;
+
+// "LEA" forms of add (patterns to make tblgen happy)
+let Predicates = [Is32Bit], isCodeGenOnly = 1 in
+ def LEA_ADDri : F3_2<2, 0b000000,
+ (outs IntRegs:$dst), (ins MEMri:$addr),
+ "add ${addr:arith}, $dst",
+ [(set iPTR:$dst, ADDRri:$addr)]>;
+
+let Defs = [ICC] in
+ defm ADDCC : F3_12<"addcc", 0b010000, addc, IntRegs, i32, simm13Op>;
+
+let Uses = [ICC] in
+ defm ADDC : F3_12np<"addx", 0b001000>;
+
+let Uses = [ICC], Defs = [ICC] in
+ defm ADDE : F3_12<"addxcc", 0b011000, adde, IntRegs, i32, simm13Op>;
+
+// Section B.15 - Subtract Instructions, p. 110
+defm SUB : F3_12 <"sub" , 0b000100, sub, IntRegs, i32, simm13Op>;
+let Uses = [ICC], Defs = [ICC] in
+ defm SUBE : F3_12 <"subxcc" , 0b011100, sube, IntRegs, i32, simm13Op>;
+
+let Defs = [ICC] in
+ defm SUBCC : F3_12 <"subcc", 0b010100, subc, IntRegs, i32, simm13Op>;
+
+let Uses = [ICC] in
+ defm SUBC : F3_12np <"subx", 0b001100>;
+
+// cmp (from Section A.3) is a specialized alias for subcc
+let Defs = [ICC], rd = 0 in {
+ def CMPrr : F3_1<2, 0b010100,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "cmp $rs1, $rs2",
+ [(SPcmpicc i32:$rs1, i32:$rs2)]>;
+ def CMPri : F3_2<2, 0b010100,
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "cmp $rs1, $simm13",
+ [(SPcmpicc i32:$rs1, (i32 simm13:$simm13))]>;
+}
+
+// Section B.18 - Multiply Instructions, p. 113
+let Defs = [Y] in {
+ defm UMUL : F3_12<"umul", 0b001010, umullohi, IntRegs, i32, simm13Op, IIC_iu_umul>;
+ defm SMUL : F3_12<"smul", 0b001011, smullohi, IntRegs, i32, simm13Op, IIC_iu_smul>;
+}
+
+let Defs = [Y, ICC] in {
+ defm UMULCC : F3_12np<"umulcc", 0b011010, IIC_iu_umul>;
+ defm SMULCC : F3_12np<"smulcc", 0b011011, IIC_iu_smul>;
+}
+
+let Defs = [Y, ICC], Uses = [Y, ICC] in {
+ defm MULSCC : F3_12np<"mulscc", 0b100100>;
+}
+
+// Section B.19 - Divide Instructions, p. 115
+let Uses = [Y], Defs = [Y] in {
+ defm UDIV : F3_12np<"udiv", 0b001110, IIC_iu_div>;
+ defm SDIV : F3_12np<"sdiv", 0b001111, IIC_iu_div>;
+}
+
+let Uses = [Y], Defs = [Y, ICC] in {
+ defm UDIVCC : F3_12np<"udivcc", 0b011110, IIC_iu_div>;
+ defm SDIVCC : F3_12np<"sdivcc", 0b011111, IIC_iu_div>;
+}
+
+// Section B.20 - SAVE and RESTORE, p. 117
+defm SAVE : F3_12np<"save" , 0b111100>;
+defm RESTORE : F3_12np<"restore", 0b111101>;
+
+// Section B.21 - Branch on Integer Condition Codes Instructions, p. 119
+
+// unconditional branch class.
+class BranchAlways<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, 0, (outs), ins, asmstr, pattern> {
+ let isBranch = 1;
+ let isTerminator = 1;
+ let hasDelaySlot = 1;
+ let isBarrier = 1;
+}
+
+let cond = 8 in
+ def BA : BranchAlways<(ins brtarget:$imm22), "ba $imm22", [(br bb:$imm22)]>;
+
+
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
+// conditional branch class:
+class BranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, 0, (outs), ins, asmstr, pattern, IIC_iu_instr>;
+
+// conditional branch with annul class:
+class BranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b010, 1, (outs), ins, asmstr, pattern, IIC_iu_instr>;
+
+// Conditional branch class on %icc|%xcc with predication:
+multiclass IPredBranch<string regstr, list<dag> CCPattern> {
+ def CC : F2_3<0b001, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond ", !strconcat(regstr, ", $imm19")),
+ CCPattern,
+ IIC_iu_instr>;
+ def CCA : F2_3<0b001, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,a ", !strconcat(regstr, ", $imm19")),
+ [],
+ IIC_iu_instr>;
+ def CCNT : F2_3<0b001, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,pn ", !strconcat(regstr, ", $imm19")),
+ [],
+ IIC_iu_instr>;
+ def CCANT : F2_3<0b001, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond),
+ !strconcat("b$cond,a,pn ", !strconcat(regstr, ", $imm19")),
+ [],
+ IIC_iu_instr>;
+}
+
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
+
+
+// Indirect branch instructions.
+let isTerminator = 1, isBarrier = 1, hasDelaySlot = 1, isBranch =1,
+ isIndirectBranch = 1, rd = 0, isCodeGenOnly = 1 in {
+ def BINDrr : F3_1<2, 0b111000,
+ (outs), (ins MEMrr:$ptr),
+ "jmp $ptr",
+ [(brind ADDRrr:$ptr)]>;
+ def BINDri : F3_2<2, 0b111000,
+ (outs), (ins MEMri:$ptr),
+ "jmp $ptr",
+ [(brind ADDRri:$ptr)]>;
+}
+
+let Uses = [ICC] in {
+ def BCOND : BranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "b$cond $imm22",
+ [(SPbricc bb:$imm22, imm:$cond)]>;
+ def BCONDA : BranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "b$cond,a $imm22", []>;
+
+ let Predicates = [HasV9], cc = 0b00 in
+ defm BPI : IPredBranch<"%icc", []>;
+}
+
+// Section B.22 - Branch on Floating-point Condition Codes Instructions, p. 121
+
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
+// floating-point conditional branch class:
+class FPBranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b110, 0, (outs), ins, asmstr, pattern, IIC_fpu_normal_instr>;
+
+// floating-point conditional branch with annul class:
+class FPBranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b110, 1, (outs), ins, asmstr, pattern, IIC_fpu_normal_instr>;
+
+// Conditional branch class on %fcc0-%fcc3 with predication:
+multiclass FPredBranch {
+ def CC : F2_3<0b101, 0, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond $cc, $imm19", [], IIC_fpu_normal_instr>;
+ def CCA : F2_3<0b101, 1, 1, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,a $cc, $imm19", [], IIC_fpu_normal_instr>;
+ def CCNT : F2_3<0b101, 0, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,pn $cc, $imm19", [], IIC_fpu_normal_instr>;
+ def CCANT : F2_3<0b101, 1, 0, (outs), (ins bprtarget:$imm19, CCOp:$cond,
+ FCCRegs:$cc),
+ "fb$cond,a,pn $cc, $imm19", [], IIC_fpu_normal_instr>;
+}
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
+
+let Uses = [FCC0] in {
+ def FBCOND : FPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "fb$cond $imm22",
+ [(SPbrfcc bb:$imm22, imm:$cond)]>;
+ def FBCONDA : FPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "fb$cond,a $imm22", []>;
+}
+
+let Predicates = [HasV9] in
+ defm BPF : FPredBranch;
+
+// Section B.22 - Branch on Co-processor Condition Codes Instructions, p. 123
+let isBranch = 1, isTerminator = 1, hasDelaySlot = 1 in {
+
+// co-processor conditional branch class:
+class CPBranchSP<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b111, 0, (outs), ins, asmstr, pattern>;
+
+// co-processor conditional branch with annul class:
+class CPBranchSPA<dag ins, string asmstr, list<dag> pattern>
+ : F2_2<0b111, 1, (outs), ins, asmstr, pattern>;
+
+} // let isBranch = 1, isTerminator = 1, hasDelaySlot = 1
+
+def CBCOND : CPBranchSP<(ins brtarget:$imm22, CCOp:$cond),
+ "cb$cond $imm22",
+ [(SPbrfcc bb:$imm22, imm:$cond)]>;
+def CBCONDA : CPBranchSPA<(ins brtarget:$imm22, CCOp:$cond),
+ "cb$cond,a $imm22", []>;
+
+// Section B.24 - Call and Link Instruction, p. 125
+// This is the only Format 1 instruction
+let Uses = [O6],
+ hasDelaySlot = 1, isCall = 1 in {
+ def CALL : InstSP<(outs), (ins calltarget:$disp, variable_ops),
+ "call $disp",
+ [],
+ IIC_jmp_or_call> {
+ bits<30> disp;
+ let op = 1;
+ let Inst{29-0} = disp;
+ }
+
+ // indirect calls: special cases of JMPL.
+ let isCodeGenOnly = 1, rd = 15 in {
+ def CALLrr : F3_1<2, 0b111000,
+ (outs), (ins MEMrr:$ptr, variable_ops),
+ "call $ptr",
+ [(call ADDRrr:$ptr)],
+ IIC_jmp_or_call>;
+ def CALLri : F3_2<2, 0b111000,
+ (outs), (ins MEMri:$ptr, variable_ops),
+ "call $ptr",
+ [(call ADDRri:$ptr)],
+ IIC_jmp_or_call>;
+ }
+}
+
+// Section B.25 - Jump and Link Instruction
+
+// JMPL Instruction.
+let isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
+ DecoderMethod = "DecodeJMPL" in {
+ def JMPLrr: F3_1<2, 0b111000,
+ (outs IntRegs:$dst), (ins MEMrr:$addr),
+ "jmpl $addr, $dst",
+ [],
+ IIC_jmp_or_call>;
+ def JMPLri: F3_2<2, 0b111000,
+ (outs IntRegs:$dst), (ins MEMri:$addr),
+ "jmpl $addr, $dst",
+ [],
+ IIC_jmp_or_call>;
+}
+
+// Section A.3 - Synthetic Instructions, p. 85
+// special cases of JMPL:
+let isReturn = 1, isTerminator = 1, hasDelaySlot = 1, isBarrier = 1,
+ isCodeGenOnly = 1 in {
+ let rd = 0, rs1 = 15 in
+ def RETL: F3_2<2, 0b111000,
+ (outs), (ins i32imm:$val),
+ "jmp %o7+$val",
+ [(retflag simm13:$val)],
+ IIC_jmp_or_call>;
+
+ let rd = 0, rs1 = 31 in
+ def RET: F3_2<2, 0b111000,
+ (outs), (ins i32imm:$val),
+ "jmp %i7+$val",
+ [],
+ IIC_jmp_or_call>;
+}
+
+// Section B.26 - Return from Trap Instruction
+let isReturn = 1, isTerminator = 1, hasDelaySlot = 1,
+ isBarrier = 1, rd = 0, DecoderMethod = "DecodeReturn" in {
+ def RETTrr : F3_1<2, 0b111001,
+ (outs), (ins MEMrr:$addr),
+ "rett $addr",
+ [],
+ IIC_jmp_or_call>;
+ def RETTri : F3_2<2, 0b111001,
+ (outs), (ins MEMri:$addr),
+ "rett $addr",
+ [],
+ IIC_jmp_or_call>;
+}
+
+
+// Section B.27 - Trap on Integer Condition Codes Instruction
+// conditional branch class:
+let DecoderNamespace = "SparcV8", DecoderMethod = "DecodeTRAP", hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
+{
+ def TRAPrr : TRAPSPrr<0b111010,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2, CCOp:$cond),
+ "t$cond $rs1 + $rs2",
+ []>;
+ def TRAPri : TRAPSPri<0b111010,
+ (outs), (ins IntRegs:$rs1, i32imm:$imm, CCOp:$cond),
+ "t$cond $rs1 + $imm",
+ []>;
+}
+
+multiclass TRAP<string regStr> {
+ def rr : TRAPSPrr<0b111010,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2, CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $rs2"),
+ []>;
+ def ri : TRAPSPri<0b111010,
+ (outs), (ins IntRegs:$rs1, i32imm:$imm, CCOp:$cond),
+ !strconcat(!strconcat("t$cond ", regStr), ", $rs1 + $imm"),
+ []>;
+}
+
+let DecoderNamespace = "SparcV9", DecoderMethod = "DecodeTRAP", Predicates = [HasV9], hasSideEffects = 1, Uses = [ICC], cc = 0b00 in
+ defm TICC : TRAP<"%icc">;
+
+
+let isBarrier = 1, isTerminator = 1, rd = 0b01000, rs1 = 0, simm13 = 5 in
+ def TA5 : F3_2<0b10, 0b111010, (outs), (ins), "ta 5", [(trap)]>;
+
+// Section B.28 - Read State Register Instructions
+let rs2 = 0 in
+ def RDASR : F3_1<2, 0b101000,
+ (outs IntRegs:$rd), (ins ASRRegs:$rs1),
+ "rd $rs1, $rd", []>;
+
+// PSR, WIM, and TBR don't exist on the SparcV9, only the V8.
+let Predicates = [HasNoV9] in {
+ let rs2 = 0, rs1 = 0, Uses=[PSR] in
+ def RDPSR : F3_1<2, 0b101001,
+ (outs IntRegs:$rd), (ins),
+ "rd %psr, $rd", []>;
+
+ let rs2 = 0, rs1 = 0, Uses=[WIM] in
+ def RDWIM : F3_1<2, 0b101010,
+ (outs IntRegs:$rd), (ins),
+ "rd %wim, $rd", []>;
+
+ let rs2 = 0, rs1 = 0, Uses=[TBR] in
+ def RDTBR : F3_1<2, 0b101011,
+ (outs IntRegs:$rd), (ins),
+ "rd %tbr, $rd", []>;
+}
+
+// Section B.29 - Write State Register Instructions
+def WRASRrr : F3_1<2, 0b110000,
+ (outs ASRRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wr $rs1, $rs2, $rd", []>;
+def WRASRri : F3_2<2, 0b110000,
+ (outs ASRRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wr $rs1, $simm13, $rd", []>;
+
+// PSR, WIM, and TBR don't exist on the SparcV9, only the V8.
+let Predicates = [HasNoV9] in {
+ let Defs = [PSR], rd=0 in {
+ def WRPSRrr : F3_1<2, 0b110001,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wr $rs1, $rs2, %psr", []>;
+ def WRPSRri : F3_2<2, 0b110001,
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wr $rs1, $simm13, %psr", []>;
+ }
+
+ let Defs = [WIM], rd=0 in {
+ def WRWIMrr : F3_1<2, 0b110010,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wr $rs1, $rs2, %wim", []>;
+ def WRWIMri : F3_2<2, 0b110010,
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wr $rs1, $simm13, %wim", []>;
+ }
+
+ let Defs = [TBR], rd=0 in {
+ def WRTBRrr : F3_1<2, 0b110011,
+ (outs), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wr $rs1, $rs2, %tbr", []>;
+ def WRTBRri : F3_2<2, 0b110011,
+ (outs), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wr $rs1, $simm13, %tbr", []>;
+ }
+}
+
+// Section B.30 - STBAR Instruction
+let hasSideEffects = 1, rd = 0, rs1 = 0b01111, rs2 = 0 in
+ def STBAR : F3_1<2, 0b101000, (outs), (ins), "stbar", []>;
+
+
+// Section B.31 - Unimplmented Instruction
+let rd = 0 in
+ def UNIMP : F2_1<0b000, (outs), (ins i32imm:$imm22),
+ "unimp $imm22", []>;
+
+// Section B.32 - Flush Instruction Memory
+let rd = 0 in {
+ def FLUSHrr : F3_1<2, 0b111011, (outs), (ins MEMrr:$addr),
+ "flush $addr", []>;
+ def FLUSHri : F3_2<2, 0b111011, (outs), (ins MEMri:$addr),
+ "flush $addr", []>;
+
+ // The no-arg FLUSH is only here for the benefit of the InstAlias
+ // "flush", which cannot seem to use FLUSHrr, due to the inability
+ // to construct a MEMrr with fixed G0 registers.
+ let rs1 = 0, rs2 = 0 in
+ def FLUSH : F3_1<2, 0b111011, (outs), (ins), "flush %g0", []>;
+}
+
+// Section B.33 - Floating-point Operate (FPop) Instructions
+
+// Convert Integer to Floating-point Instructions, p. 141
+def FITOS : F3_3u<2, 0b110100, 0b011000100,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fitos $rs2, $rd",
+ [(set FPRegs:$rd, (SPitof FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
+def FITOD : F3_3u<2, 0b110100, 0b011001000,
+ (outs DFPRegs:$rd), (ins FPRegs:$rs2),
+ "fitod $rs2, $rd",
+ [(set DFPRegs:$rd, (SPitof FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
+def FITOQ : F3_3u<2, 0b110100, 0b011001100,
+ (outs QFPRegs:$rd), (ins FPRegs:$rs2),
+ "fitoq $rs2, $rd",
+ [(set QFPRegs:$rd, (SPitof FPRegs:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+// Convert Floating-point to Integer Instructions, p. 142
+def FSTOI : F3_3u<2, 0b110100, 0b011010001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fstoi $rs2, $rd",
+ [(set FPRegs:$rd, (SPftoi FPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
+def FDTOI : F3_3u<2, 0b110100, 0b011010010,
+ (outs FPRegs:$rd), (ins DFPRegs:$rs2),
+ "fdtoi $rs2, $rd",
+ [(set FPRegs:$rd, (SPftoi DFPRegs:$rs2))],
+ IIC_fpu_fast_instr>;
+def FQTOI : F3_3u<2, 0b110100, 0b011010011,
+ (outs FPRegs:$rd), (ins QFPRegs:$rs2),
+ "fqtoi $rs2, $rd",
+ [(set FPRegs:$rd, (SPftoi QFPRegs:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+// Convert between Floating-point Formats Instructions, p. 143
+def FSTOD : F3_3u<2, 0b110100, 0b011001001,
+ (outs DFPRegs:$rd), (ins FPRegs:$rs2),
+ "fstod $rs2, $rd",
+ [(set f64:$rd, (fpextend f32:$rs2))],
+ IIC_fpu_stod>;
+def FSTOQ : F3_3u<2, 0b110100, 0b011001101,
+ (outs QFPRegs:$rd), (ins FPRegs:$rs2),
+ "fstoq $rs2, $rd",
+ [(set f128:$rd, (fpextend f32:$rs2))]>,
+ Requires<[HasHardQuad]>;
+def FDTOS : F3_3u<2, 0b110100, 0b011000110,
+ (outs FPRegs:$rd), (ins DFPRegs:$rs2),
+ "fdtos $rs2, $rd",
+ [(set f32:$rd, (fpround f64:$rs2))],
+ IIC_fpu_fast_instr>;
+def FDTOQ : F3_3u<2, 0b110100, 0b011001110,
+ (outs QFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fdtoq $rs2, $rd",
+ [(set f128:$rd, (fpextend f64:$rs2))]>,
+ Requires<[HasHardQuad]>;
+def FQTOS : F3_3u<2, 0b110100, 0b011000111,
+ (outs FPRegs:$rd), (ins QFPRegs:$rs2),
+ "fqtos $rs2, $rd",
+ [(set f32:$rd, (fpround f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+def FQTOD : F3_3u<2, 0b110100, 0b011001011,
+ (outs DFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fqtod $rs2, $rd",
+ [(set f64:$rd, (fpround f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+// Floating-point Move Instructions, p. 144
+def FMOVS : F3_3u<2, 0b110100, 0b000000001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fmovs $rs2, $rd", []>;
+def FNEGS : F3_3u<2, 0b110100, 0b000000101,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fnegs $rs2, $rd",
+ [(set f32:$rd, (fneg f32:$rs2))],
+ IIC_fpu_negs>;
+def FABSS : F3_3u<2, 0b110100, 0b000001001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fabss $rs2, $rd",
+ [(set f32:$rd, (fabs f32:$rs2))],
+ IIC_fpu_abs>;
+
+
+// Floating-point Square Root Instructions, p.145
+// FSQRTS generates an erratum on LEON processors, so by disabling this instruction
+// this will be promoted to use FSQRTD with doubles instead.
+let Predicates = [HasNoFdivSqrtFix] in
+def FSQRTS : F3_3u<2, 0b110100, 0b000101001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs2),
+ "fsqrts $rs2, $rd",
+ [(set f32:$rd, (fsqrt f32:$rs2))],
+ IIC_fpu_sqrts>;
+def FSQRTD : F3_3u<2, 0b110100, 0b000101010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fsqrtd $rs2, $rd",
+ [(set f64:$rd, (fsqrt f64:$rs2))],
+ IIC_fpu_sqrtd>;
+def FSQRTQ : F3_3u<2, 0b110100, 0b000101011,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fsqrtq $rs2, $rd",
+ [(set f128:$rd, (fsqrt f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+
+
+// Floating-point Add and Subtract Instructions, p. 146
+def FADDS : F3_3<2, 0b110100, 0b001000001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fadds $rs1, $rs2, $rd",
+ [(set f32:$rd, (fadd f32:$rs1, f32:$rs2))],
+ IIC_fpu_fast_instr>;
+def FADDD : F3_3<2, 0b110100, 0b001000010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "faddd $rs1, $rs2, $rd",
+ [(set f64:$rd, (fadd f64:$rs1, f64:$rs2))],
+ IIC_fpu_fast_instr>;
+def FADDQ : F3_3<2, 0b110100, 0b001000011,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "faddq $rs1, $rs2, $rd",
+ [(set f128:$rd, (fadd f128:$rs1, f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+def FSUBS : F3_3<2, 0b110100, 0b001000101,
+ (outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fsubs $rs1, $rs2, $rd",
+ [(set f32:$rd, (fsub f32:$rs1, f32:$rs2))],
+ IIC_fpu_fast_instr>;
+def FSUBD : F3_3<2, 0b110100, 0b001000110,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fsubd $rs1, $rs2, $rd",
+ [(set f64:$rd, (fsub f64:$rs1, f64:$rs2))],
+ IIC_fpu_fast_instr>;
+def FSUBQ : F3_3<2, 0b110100, 0b001000111,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fsubq $rs1, $rs2, $rd",
+ [(set f128:$rd, (fsub f128:$rs1, f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+
+// Floating-point Multiply and Divide Instructions, p. 147
+def FMULS : F3_3<2, 0b110100, 0b001001001,
+ (outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fmuls $rs1, $rs2, $rd",
+ [(set f32:$rd, (fmul f32:$rs1, f32:$rs2))],
+ IIC_fpu_muls>,
+ Requires<[HasFMULS]>;
+def FMULD : F3_3<2, 0b110100, 0b001001010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fmuld $rs1, $rs2, $rd",
+ [(set f64:$rd, (fmul f64:$rs1, f64:$rs2))],
+ IIC_fpu_muld>;
+def FMULQ : F3_3<2, 0b110100, 0b001001011,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fmulq $rs1, $rs2, $rd",
+ [(set f128:$rd, (fmul f128:$rs1, f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+def FSMULD : F3_3<2, 0b110100, 0b001101001,
+ (outs DFPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fsmuld $rs1, $rs2, $rd",
+ [(set f64:$rd, (fmul (fpextend f32:$rs1),
+ (fpextend f32:$rs2)))],
+ IIC_fpu_muld>,
+ Requires<[HasFSMULD]>;
+def FDMULQ : F3_3<2, 0b110100, 0b001101110,
+ (outs QFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fdmulq $rs1, $rs2, $rd",
+ [(set f128:$rd, (fmul (fpextend f64:$rs1),
+ (fpextend f64:$rs2)))]>,
+ Requires<[HasHardQuad]>;
+
+// FDIVS generates an erratum on LEON processors, so by disabling this instruction
+// this will be promoted to use FDIVD with doubles instead.
+def FDIVS : F3_3<2, 0b110100, 0b001001101,
+ (outs FPRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fdivs $rs1, $rs2, $rd",
+ [(set f32:$rd, (fdiv f32:$rs1, f32:$rs2))],
+ IIC_fpu_divs>;
+def FDIVD : F3_3<2, 0b110100, 0b001001110,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fdivd $rs1, $rs2, $rd",
+ [(set f64:$rd, (fdiv f64:$rs1, f64:$rs2))],
+ IIC_fpu_divd>;
+def FDIVQ : F3_3<2, 0b110100, 0b001001111,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fdivq $rs1, $rs2, $rd",
+ [(set f128:$rd, (fdiv f128:$rs1, f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+
+// Floating-point Compare Instructions, p. 148
+// Note: the 2nd template arg is different for these guys.
+// Note 2: the result of a FCMP is not available until the 2nd cycle
+// after the instr is retired, but there is no interlock in Sparc V8.
+// This behavior is modeled with a forced noop after the instruction in
+// DelaySlotFiller.
+
+let Defs = [FCC0], rd = 0, isCodeGenOnly = 1 in {
+ def FCMPS : F3_3c<2, 0b110101, 0b001010001,
+ (outs), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fcmps $rs1, $rs2",
+ [(SPcmpfcc f32:$rs1, f32:$rs2)],
+ IIC_fpu_fast_instr>;
+ def FCMPD : F3_3c<2, 0b110101, 0b001010010,
+ (outs), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fcmpd $rs1, $rs2",
+ [(SPcmpfcc f64:$rs1, f64:$rs2)],
+ IIC_fpu_fast_instr>;
+ def FCMPQ : F3_3c<2, 0b110101, 0b001010011,
+ (outs), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fcmpq $rs1, $rs2",
+ [(SPcmpfcc f128:$rs1, f128:$rs2)]>,
+ Requires<[HasHardQuad]>;
+}
+
+//===----------------------------------------------------------------------===//
+// Instructions for Thread Local Storage(TLS).
+//===----------------------------------------------------------------------===//
+let isCodeGenOnly = 1, isAsmParserOnly = 1 in {
+def TLS_ADDrr : F3_1<2, 0b000000,
+ (outs IntRegs:$rd),
+ (ins IntRegs:$rs1, IntRegs:$rs2, TLSSym:$sym),
+ "add $rs1, $rs2, $rd, $sym",
+ [(set i32:$rd,
+ (tlsadd i32:$rs1, i32:$rs2, tglobaltlsaddr:$sym))]>;
+
+let mayLoad = 1 in
+ def TLS_LDrr : F3_1<3, 0b000000,
+ (outs IntRegs:$dst), (ins MEMrr:$addr, TLSSym:$sym),
+ "ld [$addr], $dst, $sym",
+ [(set i32:$dst,
+ (tlsld ADDRrr:$addr, tglobaltlsaddr:$sym))]>;
+
+let Uses = [O6], isCall = 1, hasDelaySlot = 1 in
+ def TLS_CALL : InstSP<(outs),
+ (ins calltarget:$disp, TLSSym:$sym, variable_ops),
+ "call $disp, $sym",
+ [(tlscall texternalsym:$disp, tglobaltlsaddr:$sym)],
+ IIC_jmp_or_call> {
+ bits<30> disp;
+ let op = 1;
+ let Inst{29-0} = disp;
+}
+}
+
+//===----------------------------------------------------------------------===//
+// V9 Instructions
+//===----------------------------------------------------------------------===//
+
+// V9 Conditional Moves.
+let Predicates = [HasV9], Constraints = "$f = $rd" in {
+ // Move Integer Register on Condition (MOVcc) p. 194 of the V9 manual.
+ let Uses = [ICC], intcc = 1, cc = 0b00 in {
+ def MOVICCrr
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %icc, $rs2, $rd",
+ [(set i32:$rd, (SPselecticc i32:$rs2, i32:$f, imm:$cond))]>;
+
+ def MOVICCri
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond %icc, $simm11, $rd",
+ [(set i32:$rd,
+ (SPselecticc simm11:$simm11, i32:$f, imm:$cond))]>;
+ }
+
+ let Uses = [FCC0], intcc = 0, cc = 0b00 in {
+ def MOVFCCrr
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond %fcc0, $rs2, $rd",
+ [(set i32:$rd, (SPselectfcc i32:$rs2, i32:$f, imm:$cond))]>;
+ def MOVFCCri
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond %fcc0, $simm11, $rd",
+ [(set i32:$rd,
+ (SPselectfcc simm11:$simm11, i32:$f, imm:$cond))]>;
+ }
+
+ let Uses = [ICC], intcc = 1, opf_cc = 0b00 in {
+ def FMOVS_ICC
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %icc, $rs2, $rd",
+ [(set f32:$rd, (SPselecticc f32:$rs2, f32:$f, imm:$cond))]>;
+ def FMOVD_ICC
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %icc, $rs2, $rd",
+ [(set f64:$rd, (SPselecticc f64:$rs2, f64:$f, imm:$cond))]>;
+ def FMOVQ_ICC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovq$cond %icc, $rs2, $rd",
+ [(set f128:$rd, (SPselecticc f128:$rs2, f128:$f, imm:$cond))]>,
+ Requires<[HasHardQuad]>;
+ }
+
+ let Uses = [FCC0], intcc = 0, opf_cc = 0b00 in {
+ def FMOVS_FCC
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond %fcc0, $rs2, $rd",
+ [(set f32:$rd, (SPselectfcc f32:$rs2, f32:$f, imm:$cond))]>;
+ def FMOVD_FCC
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond %fcc0, $rs2, $rd",
+ [(set f64:$rd, (SPselectfcc f64:$rs2, f64:$f, imm:$cond))]>;
+ def FMOVQ_FCC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovq$cond %fcc0, $rs2, $rd",
+ [(set f128:$rd, (SPselectfcc f128:$rs2, f128:$f, imm:$cond))]>,
+ Requires<[HasHardQuad]>;
+ }
+
+}
+
+// Floating-Point Move Instructions, p. 164 of the V9 manual.
+let Predicates = [HasV9] in {
+ def FMOVD : F3_3u<2, 0b110100, 0b000000010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fmovd $rs2, $rd", []>;
+ def FMOVQ : F3_3u<2, 0b110100, 0b000000011,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fmovq $rs2, $rd", []>,
+ Requires<[HasHardQuad]>;
+ def FNEGD : F3_3u<2, 0b110100, 0b000000110,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fnegd $rs2, $rd",
+ [(set f64:$rd, (fneg f64:$rs2))]>;
+ def FNEGQ : F3_3u<2, 0b110100, 0b000000111,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fnegq $rs2, $rd",
+ [(set f128:$rd, (fneg f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+ def FABSD : F3_3u<2, 0b110100, 0b000001010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs2),
+ "fabsd $rs2, $rd",
+ [(set f64:$rd, (fabs f64:$rs2))]>;
+ def FABSQ : F3_3u<2, 0b110100, 0b000001011,
+ (outs QFPRegs:$rd), (ins QFPRegs:$rs2),
+ "fabsq $rs2, $rd",
+ [(set f128:$rd, (fabs f128:$rs2))]>,
+ Requires<[HasHardQuad]>;
+}
+
+// Floating-point compare instruction with %fcc0-%fcc3.
+def V9FCMPS : F3_3c<2, 0b110101, 0b001010001,
+ (outs FCCRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fcmps $rd, $rs1, $rs2", []>;
+def V9FCMPD : F3_3c<2, 0b110101, 0b001010010,
+ (outs FCCRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fcmpd $rd, $rs1, $rs2", []>;
+def V9FCMPQ : F3_3c<2, 0b110101, 0b001010011,
+ (outs FCCRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fcmpq $rd, $rs1, $rs2", []>,
+ Requires<[HasHardQuad]>;
+
+let hasSideEffects = 1 in {
+ def V9FCMPES : F3_3c<2, 0b110101, 0b001010101,
+ (outs FCCRegs:$rd), (ins FPRegs:$rs1, FPRegs:$rs2),
+ "fcmpes $rd, $rs1, $rs2", []>;
+ def V9FCMPED : F3_3c<2, 0b110101, 0b001010110,
+ (outs FCCRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fcmped $rd, $rs1, $rs2", []>;
+ def V9FCMPEQ : F3_3c<2, 0b110101, 0b001010111,
+ (outs FCCRegs:$rd), (ins QFPRegs:$rs1, QFPRegs:$rs2),
+ "fcmpeq $rd, $rs1, $rs2", []>,
+ Requires<[HasHardQuad]>;
+}
+
+// Floating point conditional move instrucitons with %fcc0-%fcc3.
+let Predicates = [HasV9] in {
+ let Constraints = "$f = $rd", intcc = 0 in {
+ def V9MOVFCCrr
+ : F4_1<0b101100, (outs IntRegs:$rd),
+ (ins FCCRegs:$cc, IntRegs:$rs2, IntRegs:$f, CCOp:$cond),
+ "mov$cond $cc, $rs2, $rd", []>;
+ def V9MOVFCCri
+ : F4_2<0b101100, (outs IntRegs:$rd),
+ (ins FCCRegs:$cc, i32imm:$simm11, IntRegs:$f, CCOp:$cond),
+ "mov$cond $cc, $simm11, $rd", []>;
+ def V9FMOVS_FCC
+ : F4_3<0b110101, 0b000001, (outs FPRegs:$rd),
+ (ins FCCRegs:$opf_cc, FPRegs:$rs2, FPRegs:$f, CCOp:$cond),
+ "fmovs$cond $opf_cc, $rs2, $rd", []>;
+ def V9FMOVD_FCC
+ : F4_3<0b110101, 0b000010, (outs DFPRegs:$rd),
+ (ins FCCRegs:$opf_cc, DFPRegs:$rs2, DFPRegs:$f, CCOp:$cond),
+ "fmovd$cond $opf_cc, $rs2, $rd", []>;
+ def V9FMOVQ_FCC
+ : F4_3<0b110101, 0b000011, (outs QFPRegs:$rd),
+ (ins FCCRegs:$opf_cc, QFPRegs:$rs2, QFPRegs:$f, CCOp:$cond),
+ "fmovq$cond $opf_cc, $rs2, $rd", []>,
+ Requires<[HasHardQuad]>;
+ } // Constraints = "$f = $rd", ...
+} // let Predicates = [hasV9]
+
+
+// POPCrr - This does a ctpop of a 64-bit register. As such, we have to clear
+// the top 32-bits before using it. To do this clearing, we use a SRLri X,0.
+let rs1 = 0 in
+ def POPCrr : F3_1<2, 0b101110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs2),
+ "popc $rs2, $rd", []>, Requires<[HasV9]>;
+def : Pat<(ctpop i32:$src),
+ (POPCrr (SRLri $src, 0))>;
+
+let Predicates = [HasV9], hasSideEffects = 1, rd = 0, rs1 = 0b01111 in
+ def MEMBARi : F3_2<2, 0b101000, (outs), (ins simm13Op:$simm13),
+ "membar $simm13", []>;
+
+// The CAS instruction, unlike other instructions, only comes in a
+// form which requires an ASI be provided. The ASI value hardcoded
+// here is ASI_PRIMARY, the default unprivileged ASI for SparcV9.
+let Predicates = [HasV9], Constraints = "$swap = $rd", asi = 0b10000000 in
+ def CASrr: F3_1_asi<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap),
+ "cas [$rs1], $rs2, $rd",
+ [(set i32:$rd,
+ (atomic_cmp_swap_32 iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+
+
+// CASA is supported as an instruction on some LEON3 and all LEON4 processors.
+// This version can be automatically lowered from C code, selecting ASI 10
+let Predicates = [HasLeonCASA], Constraints = "$swap = $rd", asi = 0b00001010 in
+ def CASAasi10: F3_1_asi<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap),
+ "casa [$rs1] 10, $rs2, $rd",
+ [(set i32:$rd,
+ (atomic_cmp_swap_32 iPTR:$rs1, i32:$rs2, i32:$swap))]>;
+
+// CASA supported on some LEON3 and all LEON4 processors. Same pattern as
+// CASrr, above, but with a different ASI. This version is supported for
+// inline assembly lowering only.
+let Predicates = [HasLeonCASA], Constraints = "$swap = $rd" in
+ def CASArr: F3_1_asi<3, 0b111100,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2,
+ IntRegs:$swap, i8imm:$asi),
+ "casa [$rs1] $asi, $rs2, $rd", []>;
+
+// TODO: Add DAG sequence to lower these instructions. Currently, only provided
+// as inline assembler-supported instructions.
+let Predicates = [HasUMAC_SMAC], Defs = [Y, ASR18], Uses = [Y, ASR18] in {
+ def SMACrr : F3_1<2, 0b111111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2, ASRRegs:$asr18),
+ "smac $rs1, $rs2, $rd",
+ [], IIC_smac_umac>;
+
+ def SMACri : F3_2<2, 0b111111,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13, ASRRegs:$asr18),
+ "smac $rs1, $simm13, $rd",
+ [], IIC_smac_umac>;
+
+ def UMACrr : F3_1<2, 0b111110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2, ASRRegs:$asr18),
+ "umac $rs1, $rs2, $rd",
+ [], IIC_smac_umac>;
+
+ def UMACri : F3_2<2, 0b111110,
+ (outs IntRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13, ASRRegs:$asr18),
+ "umac $rs1, $simm13, $rd",
+ [], IIC_smac_umac>;
+}
+
+let Defs = [ICC] in {
+defm TADDCC : F3_12np<"taddcc", 0b100000>;
+defm TSUBCC : F3_12np<"tsubcc", 0b100001>;
+
+let hasSideEffects = 1 in {
+ defm TADDCCTV : F3_12np<"taddcctv", 0b100010>;
+ defm TSUBCCTV : F3_12np<"tsubcctv", 0b100011>;
+}
+}
+
+
+// Section A.43 - Read Privileged Register Instructions
+let Predicates = [HasV9] in {
+let rs2 = 0 in
+ def RDPR : F3_1<2, 0b101010,
+ (outs IntRegs:$rd), (ins PRRegs:$rs1),
+ "rdpr $rs1, $rd", []>;
+}
+
+// Section A.62 - Write Privileged Register Instructions
+let Predicates = [HasV9] in {
+ def WRPRrr : F3_1<2, 0b110010,
+ (outs PRRegs:$rd), (ins IntRegs:$rs1, IntRegs:$rs2),
+ "wrpr $rs1, $rs2, $rd", []>;
+ def WRPRri : F3_2<2, 0b110010,
+ (outs PRRegs:$rd), (ins IntRegs:$rs1, simm13Op:$simm13),
+ "wrpr $rs1, $simm13, $rd", []>;
+}
+
+//===----------------------------------------------------------------------===//
+// Non-Instruction Patterns
+//===----------------------------------------------------------------------===//
+
+// Small immediates.
+def : Pat<(i32 simm13:$val),
+ (ORri (i32 G0), imm:$val)>;
+// Arbitrary immediates.
+def : Pat<(i32 imm:$val),
+ (ORri (SETHIi (HI22 imm:$val)), (LO10 imm:$val))>;
+
+
+// Global addresses, constant pool entries
+let Predicates = [Is32Bit] in {
+
+def : Pat<(SPhi tglobaladdr:$in), (SETHIi tglobaladdr:$in)>;
+def : Pat<(SPlo tglobaladdr:$in), (ORri (i32 G0), tglobaladdr:$in)>;
+def : Pat<(SPhi tconstpool:$in), (SETHIi tconstpool:$in)>;
+def : Pat<(SPlo tconstpool:$in), (ORri (i32 G0), tconstpool:$in)>;
+
+// GlobalTLS addresses
+def : Pat<(SPhi tglobaltlsaddr:$in), (SETHIi tglobaltlsaddr:$in)>;
+def : Pat<(SPlo tglobaltlsaddr:$in), (ORri (i32 G0), tglobaltlsaddr:$in)>;
+def : Pat<(add (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (ADDri (SETHIi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+def : Pat<(xor (SPhi tglobaltlsaddr:$in1), (SPlo tglobaltlsaddr:$in2)),
+ (XORri (SETHIi tglobaltlsaddr:$in1), (tglobaltlsaddr:$in2))>;
+
+// Blockaddress
+def : Pat<(SPhi tblockaddress:$in), (SETHIi tblockaddress:$in)>;
+def : Pat<(SPlo tblockaddress:$in), (ORri (i32 G0), tblockaddress:$in)>;
+
+// Add reg, lo. This is used when taking the addr of a global/constpool entry.
+def : Pat<(add iPTR:$r, (SPlo tglobaladdr:$in)), (ADDri $r, tglobaladdr:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tconstpool:$in)), (ADDri $r, tconstpool:$in)>;
+def : Pat<(add iPTR:$r, (SPlo tblockaddress:$in)),
+ (ADDri $r, tblockaddress:$in)>;
+}
+
+// Calls:
+def : Pat<(call tglobaladdr:$dst),
+ (CALL tglobaladdr:$dst)>;
+def : Pat<(call texternalsym:$dst),
+ (CALL texternalsym:$dst)>;
+
+// Map integer extload's to zextloads.
+def : Pat<(i32 (extloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (extloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+def : Pat<(i32 (extloadi8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (extloadi8 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+def : Pat<(i32 (extloadi16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>;
+def : Pat<(i32 (extloadi16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
+
+// zextload bool -> zextload byte
+def : Pat<(i32 (zextloadi1 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (zextloadi1 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+
+// store 0, addr -> store %g0, addr
+def : Pat<(store (i32 0), ADDRrr:$dst), (STrr ADDRrr:$dst, (i32 G0))>;
+def : Pat<(store (i32 0), ADDRri:$dst), (STri ADDRri:$dst, (i32 G0))>;
+
+// store bar for all atomic_fence in V8.
+let Predicates = [HasNoV9] in
+ def : Pat<(atomic_fence imm, imm), (STBAR)>;
+
+// atomic_load addr -> load addr
+def : Pat<(i32 (atomic_load_8 ADDRrr:$src)), (LDUBrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_8 ADDRri:$src)), (LDUBri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_16 ADDRrr:$src)), (LDUHrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_16 ADDRri:$src)), (LDUHri ADDRri:$src)>;
+def : Pat<(i32 (atomic_load_32 ADDRrr:$src)), (LDrr ADDRrr:$src)>;
+def : Pat<(i32 (atomic_load_32 ADDRri:$src)), (LDri ADDRri:$src)>;
+
+// atomic_store val, addr -> store val, addr
+def : Pat<(atomic_store_8 ADDRrr:$dst, i32:$val), (STBrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_8 ADDRri:$dst, i32:$val), (STBri ADDRri:$dst, $val)>;
+def : Pat<(atomic_store_16 ADDRrr:$dst, i32:$val), (STHrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_16 ADDRri:$dst, i32:$val), (STHri ADDRri:$dst, $val)>;
+def : Pat<(atomic_store_32 ADDRrr:$dst, i32:$val), (STrr ADDRrr:$dst, $val)>;
+def : Pat<(atomic_store_32 ADDRri:$dst, i32:$val), (STri ADDRri:$dst, $val)>;
+
+// extract_vector
+def : Pat<(extractelt (v2i32 IntPair:$Rn), 0),
+ (i32 (EXTRACT_SUBREG IntPair:$Rn, sub_even))>;
+def : Pat<(extractelt (v2i32 IntPair:$Rn), 1),
+ (i32 (EXTRACT_SUBREG IntPair:$Rn, sub_odd))>;
+
+// build_vector
+def : Pat<(build_vector (i32 IntRegs:$a1), (i32 IntRegs:$a2)),
+ (INSERT_SUBREG
+ (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), (i32 IntRegs:$a1), sub_even),
+ (i32 IntRegs:$a2), sub_odd)>;
+
+
+include "SparcInstr64Bit.td"
+include "SparcInstrVIS.td"
+include "SparcInstrAliases.td"
diff --git a/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td b/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td
new file mode 100644
index 000000000000..d9adf3e8b0f5
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcInstrVIS.td
@@ -0,0 +1,263 @@
+//===---- SparcInstrVIS.td - Visual Instruction Set extensions (VIS) -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains instruction formats, definitions and patterns needed for
+// VIS, VIS II, VIS II instructions on SPARC.
+//===----------------------------------------------------------------------===//
+
+// VIS Instruction Format.
+class VISInstFormat<bits<9> opfval, dag outs, dag ins, string asmstr,
+ list<dag> pattern>
+ : F3_3<0b10, 0b110110, opfval, outs, ins, asmstr, pattern>;
+
+class VISInst<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs1, RC:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+// VIS Instruction with integer destination register.
+class VISInstID<bits<9> opfval, string OpcStr>
+ : VISInstFormat<opfval,
+ (outs I64Regs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ !strconcat(OpcStr, " $rs1, $rs2, $rd"), []>;
+
+// For VIS Instructions with no operand.
+let rd = 0, rs1 = 0, rs2 = 0 in
+class VISInst0<bits<9> opfval, string asmstr>
+ : VISInstFormat<opfval, (outs), (ins), asmstr, []>;
+
+// For VIS Instructions with only rs1, rd operands.
+let rs2 = 0 in
+class VISInst1<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs1),
+ !strconcat(OpcStr, " $rs1, $rd"), []>;
+
+// For VIS Instructions with only rs2, rd operands.
+let rs1 = 0 in
+class VISInst2<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$rs2),
+ !strconcat(OpcStr, " $rs2, $rd"), []>;
+
+// For VIS Instructions with only rd operand.
+let Constraints = "$rd = $f", rs1 = 0, rs2 = 0 in
+class VISInstD<bits<9> opfval, string OpcStr, RegisterClass RC = DFPRegs>
+ : VISInstFormat<opfval,
+ (outs RC:$rd), (ins RC:$f),
+ !strconcat(OpcStr, " $rd"), []>;
+
+// VIS 1 Instructions
+let Predicates = [HasVIS] in {
+
+def FPADD16 : VISInst<0b001010000, "fpadd16">;
+def FPADD16S : VISInst<0b001010001, "fpadd16s">;
+def FPADD32 : VISInst<0b001010010, "fpadd32">;
+def FPADD32S : VISInst<0b001010011, "fpadd32s">;
+def FPSUB16 : VISInst<0b001010100, "fpsub16">;
+def FPSUB16S : VISInst<0b001010101, "fpsub16S">;
+def FPSUB32 : VISInst<0b001010110, "fpsub32">;
+def FPSUB32S : VISInst<0b001010111, "fpsub32S">;
+
+def FPACK16 : VISInst2<0b000111011, "fpack16">;
+def FPACK32 : VISInst <0b000111010, "fpack32">;
+def FPACKFIX : VISInst2<0b000111101, "fpackfix">;
+def FEXPAND : VISInst2<0b001001101, "fexpand">;
+def FPMERGE : VISInst <0b001001011, "fpmerge">;
+
+def FMUL8X16 : VISInst<0b000110001, "fmul8x16">;
+def FMUL8X16AU : VISInst<0b000110011, "fmul8x16au">;
+def FMUL8X16AL : VISInst<0b000110101, "fmul8x16al">;
+def FMUL8SUX16 : VISInst<0b000110110, "fmul8sux16">;
+def FMUL8ULX16 : VISInst<0b000110111, "fmul8ulx16">;
+def FMULD8SUX16 : VISInst<0b000111000, "fmuld8sux16">;
+def FMULD8ULX16 : VISInst<0b000111001, "fmuld8ulx16">;
+
+def ALIGNADDR : VISInst<0b000011000, "alignaddr", I64Regs>;
+def ALIGNADDRL : VISInst<0b000011010, "alignaddrl", I64Regs>;
+def FALIGNADATA : VISInst<0b001001000, "faligndata">;
+
+def FZERO : VISInstD<0b001100000, "fzero">;
+def FZEROS : VISInstD<0b001100001, "fzeros", FPRegs>;
+def FONE : VISInstD<0b001111110, "fone">;
+def FONES : VISInstD<0b001111111, "fones", FPRegs>;
+def FSRC1 : VISInst1<0b001110100, "fsrc1">;
+def FSRC1S : VISInst1<0b001110101, "fsrc1s", FPRegs>;
+def FSRC2 : VISInst2<0b001111000, "fsrc2">;
+def FSRC2S : VISInst2<0b001111001, "fsrc2s", FPRegs>;
+def FNOT1 : VISInst1<0b001101010, "fnot1">;
+def FNOT1S : VISInst1<0b001101011, "fnot1s", FPRegs>;
+def FNOT2 : VISInst2<0b001100110, "fnot2">;
+def FNOT2S : VISInst2<0b001100111, "fnot2s", FPRegs>;
+def FOR : VISInst<0b001111100, "for">;
+def FORS : VISInst<0b001111101, "fors", FPRegs>;
+def FNOR : VISInst<0b001100010, "fnor">;
+def FNORS : VISInst<0b001100011, "fnors", FPRegs>;
+def FAND : VISInst<0b001110000, "fand">;
+def FANDS : VISInst<0b001110001, "fands", FPRegs>;
+def FNAND : VISInst<0b001101110, "fnand">;
+def FNANDS : VISInst<0b001101111, "fnands", FPRegs>;
+def FXOR : VISInst<0b001101100, "fxor">;
+def FXORS : VISInst<0b001101101, "fxors", FPRegs>;
+def FXNOR : VISInst<0b001110010, "fxnor">;
+def FXNORS : VISInst<0b001110011, "fxnors", FPRegs>;
+
+def FORNOT1 : VISInst<0b001111010, "fornot1">;
+def FORNOT1S : VISInst<0b001111011, "fornot1s", FPRegs>;
+def FORNOT2 : VISInst<0b001110110, "fornot2">;
+def FORNOT2S : VISInst<0b001110111, "fornot2s", FPRegs>;
+def FANDNOT1 : VISInst<0b001101000, "fandnot1">;
+def FANDNOT1S : VISInst<0b001101001, "fandnot1s", FPRegs>;
+def FANDNOT2 : VISInst<0b001100100, "fandnot2">;
+def FANDNOT2S : VISInst<0b001100101, "fandnot2s", FPRegs>;
+
+def FCMPGT16 : VISInstID<0b000101000, "fcmpgt16">;
+def FCMPGT32 : VISInstID<0b000101100, "fcmpgt32">;
+def FCMPLE16 : VISInstID<0b000100000, "fcmple16">;
+def FCMPLE32 : VISInstID<0b000100100, "fcmple32">;
+def FCMPNE16 : VISInstID<0b000100010, "fcmpne16">;
+def FCMPNE32 : VISInstID<0b000100110, "fcmpne32">;
+def FCMPEQ16 : VISInstID<0b000101010, "fcmpeq16">;
+def FCMPEQ32 : VISInstID<0b000101110, "fcmpeq32">;
+
+
+def EDGE8 : VISInst<0b000000000, "edge8", I64Regs>;
+def EDGE8L : VISInst<0b000000010, "edge8l", I64Regs>;
+def EDGE16 : VISInst<0b000000100, "edge16", I64Regs>;
+def EDGE16L : VISInst<0b000000110, "edge16l", I64Regs>;
+def EDGE32 : VISInst<0b000001000, "edge32", I64Regs>;
+def EDGE32L : VISInst<0b000001010, "edge32l", I64Regs>;
+
+def PDIST : VISInst<0b000111110, "pdist">;
+
+def ARRAY8 : VISInst<0b000010000, "array8", I64Regs>;
+def ARRAY16 : VISInst<0b000010010, "array16", I64Regs>;
+def ARRAY32 : VISInst<0b000010100, "array32", I64Regs>;
+
+def SHUTDOWN : VISInst0<0b010000000, "shutdown">;
+
+} // Predicates = [HasVIS]
+
+
+// VIS 2 Instructions.
+let Predicates = [HasVIS2] in {
+
+def BMASK : VISInst<0b000011001, "bmask", I64Regs>;
+def BSHUFFLE : VISInst<0b000011100, "bshuffle">;
+
+def SIAM : VISInst0<0b010000001, "siam">;
+
+def EDGE8N : VISInst<0b000000001, "edge8n", I64Regs>;
+def EDGE8LN : VISInst<0b000000011, "edge8ln", I64Regs>;
+def EDGE16N : VISInst<0b000000101, "edge16n", I64Regs>;
+def EDGE16LN : VISInst<0b000000111, "edge16ln", I64Regs>;
+def EDGE32N : VISInst<0b000001001, "edge32n", I64Regs>;
+def EDGE32LN : VISInst<0b000001011, "edge32ln", I64Regs>;
+} // Predicates = [HasVIS2]
+
+
+// VIS 3 Instructions.
+let Predicates = [HasVIS3] in {
+
+let Uses = [ICC] in
+def ADDXC : VISInst<0b000010001, "addxc", I64Regs>;
+
+let Defs = [ICC], Uses = [ICC] in
+def ADDXCCC : VISInst<0b000010011, "addxccc", I64Regs>;
+
+let rd = 0, rs1 = 0 in {
+def CMASK8 : VISInstFormat<0b000011011, (outs), (ins I64Regs:$rs2),
+ "cmask8 $rs2", []>;
+def CMASK16 : VISInstFormat<0b000011101, (outs), (ins I64Regs:$rs2),
+ "cmask16 $rs2", []>;
+def CMASK32 : VISInstFormat<0b000011111, (outs), (ins I64Regs:$rs2),
+ "cmask32 $rs2", []>;
+
+}
+
+def FCHKSM16 : VISInst<0b001000100, "fchksm16">;
+
+def FHADDS : F3_3<0b10, 0b110100, 0b001100001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhadds $rs1, $rs2, $rd", []>;
+def FHADDD : F3_3<0b10, 0b110100, 0b001100010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhaddd $rs1, $rs2, $rd", []>;
+def FHSUBS : F3_3<0b10, 0b110100, 0b001100101,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhsubs $rs1, $rs2, $rd", []>;
+def FHSUBD : F3_3<0b10, 0b110100, 0b001100110,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fhsubd $rs1, $rs2, $rd", []>;
+def FLCMPS : VISInstFormat<0b101010001, (outs FCCRegs:$rd),
+ (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "flcmps $rd, $rs1, $rs2", []>;
+def FLCMPD : VISInstFormat<0b101010010, (outs FCCRegs:$rd),
+ (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "flcmpd $rd, $rs1, $rs2", []>;
+
+def FMEAN16 : VISInst<0b001000000, "fmean16">;
+
+def FNADDS : F3_3<0b10, 0b110100, 0b001010001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnadds $rs1, $rs2, $rd", []>;
+def FNADDD : F3_3<0b10, 0b110100, 0b001010010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnaddd $rs1, $rs2, $rd", []>;
+def FNHADDS : F3_3<0b10, 0b110100, 0b001110001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+def FNHADDD : F3_3<0b10, 0b110100, 0b001110010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhaddd $rs1, $rs2, $rd", []>;
+
+def FNMULS : F3_3<0b10, 0b110100, 0b001011001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+def FNMULD : F3_3<0b10, 0b110100, 0b001011010,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhaddd $rs1, $rs2, $rd", []>;
+def FNSMULD : F3_3<0b10, 0b110100, 0b001111001,
+ (outs DFPRegs:$rd), (ins DFPRegs:$rs1, DFPRegs:$rs2),
+ "fnhadds $rs1, $rs2, $rd", []>;
+
+def FPADD64 : VISInst<0b001000010, "fpadd64">;
+
+def FSLL16 : VISInst<0b000100001, "fsll16">;
+def FSRL16 : VISInst<0b000100011, "fsrl16">;
+def FSLL32 : VISInst<0b000100101, "fsll32">;
+def FSRL32 : VISInst<0b000100111, "fsrl32">;
+def FSLAS16 : VISInst<0b000101001, "fslas16">;
+def FSRA16 : VISInst<0b000101011, "fsra16">;
+def FSLAS32 : VISInst<0b000101101, "fslas32">;
+def FSRA32 : VISInst<0b000101111, "fsra32">;
+
+let rs1 = 0 in
+def LZCNT : VISInstFormat<0b000010111, (outs I64Regs:$rd),
+ (ins I64Regs:$rs2), "lzcnt $rs2, $rd", []>;
+
+let rs1 = 0 in {
+def MOVSTOSW : VISInstFormat<0b100010011, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movstosw $rs2, $rd", []>;
+def MOVSTOUW : VISInstFormat<0b100010001, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movstouw $rs2, $rd", []>;
+def MOVDTOX : VISInstFormat<0b100010000, (outs I64Regs:$rd),
+ (ins DFPRegs:$rs2), "movdtox $rs2, $rd", []>;
+def MOVWTOS : VISInstFormat<0b100011001, (outs DFPRegs:$rd),
+ (ins I64Regs:$rs2), "movdtox $rs2, $rd", []>;
+def MOVXTOD : VISInstFormat<0b100011000, (outs DFPRegs:$rd),
+ (ins I64Regs:$rs2), "movdtox $rs2, $rd", []>;
+}
+
+def PDISTN : VISInst<0b000111111, "pdistn">;
+
+def UMULXHI : VISInst<0b000010110, "umulxhi", I64Regs>;
+def XMULX : VISInst<0b100010101, "xmulx", I64Regs>;
+def XMULXHI : VISInst<0b100010111, "xmulxhi", I64Regs>;
+} // Predicates = [IsVIS3]
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp b/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp
new file mode 100644
index 000000000000..a784124ff688
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcMCInstLower.cpp
@@ -0,0 +1,108 @@
+//===-- SparcMCInstLower.cpp - Convert Sparc MachineInstr to MCInst -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code to lower Sparc MachineInstrs to their corresponding
+// MCInst records.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "Sparc.h"
+#include "llvm/CodeGen/AsmPrinter.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstr.h"
+#include "llvm/CodeGen/MachineOperand.h"
+#include "llvm/IR/Mangler.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCExpr.h"
+#include "llvm/MC/MCInst.h"
+
+using namespace llvm;
+
+
+static MCOperand LowerSymbolOperand(const MachineInstr *MI,
+ const MachineOperand &MO,
+ AsmPrinter &AP) {
+
+ SparcMCExpr::VariantKind Kind =
+ (SparcMCExpr::VariantKind)MO.getTargetFlags();
+ const MCSymbol *Symbol = nullptr;
+
+ switch(MO.getType()) {
+ default: llvm_unreachable("Unknown type in LowerSymbolOperand");
+ case MachineOperand::MO_MachineBasicBlock:
+ Symbol = MO.getMBB()->getSymbol();
+ break;
+
+ case MachineOperand::MO_GlobalAddress:
+ Symbol = AP.getSymbol(MO.getGlobal());
+ break;
+
+ case MachineOperand::MO_BlockAddress:
+ Symbol = AP.GetBlockAddressSymbol(MO.getBlockAddress());
+ break;
+
+ case MachineOperand::MO_ExternalSymbol:
+ Symbol = AP.GetExternalSymbolSymbol(MO.getSymbolName());
+ break;
+
+ case MachineOperand::MO_ConstantPoolIndex:
+ Symbol = AP.GetCPISymbol(MO.getIndex());
+ break;
+ }
+
+ const MCSymbolRefExpr *MCSym = MCSymbolRefExpr::create(Symbol,
+ AP.OutContext);
+ const SparcMCExpr *expr = SparcMCExpr::create(Kind, MCSym,
+ AP.OutContext);
+ return MCOperand::createExpr(expr);
+}
+
+static MCOperand LowerOperand(const MachineInstr *MI,
+ const MachineOperand &MO,
+ AsmPrinter &AP) {
+ switch(MO.getType()) {
+ default: llvm_unreachable("unknown operand type"); break;
+ case MachineOperand::MO_Register:
+ if (MO.isImplicit())
+ break;
+ return MCOperand::createReg(MO.getReg());
+
+ case MachineOperand::MO_Immediate:
+ return MCOperand::createImm(MO.getImm());
+
+ case MachineOperand::MO_MachineBasicBlock:
+ case MachineOperand::MO_GlobalAddress:
+ case MachineOperand::MO_BlockAddress:
+ case MachineOperand::MO_ExternalSymbol:
+ case MachineOperand::MO_ConstantPoolIndex:
+ return LowerSymbolOperand(MI, MO, AP);
+
+ case MachineOperand::MO_RegisterMask: break;
+
+ }
+ return MCOperand();
+}
+
+void llvm::LowerSparcMachineInstrToMCInst(const MachineInstr *MI,
+ MCInst &OutMI,
+ AsmPrinter &AP)
+{
+
+ OutMI.setOpcode(MI->getOpcode());
+
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ const MachineOperand &MO = MI->getOperand(i);
+ MCOperand MCOp = LowerOperand(MI, MO, AP);
+
+ if (MCOp.isValid())
+ OutMI.addOperand(MCOp);
+ }
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp
new file mode 100644
index 000000000000..e7442826e78b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.cpp
@@ -0,0 +1,14 @@
+//===-- SparcMachineFunctionInfo.cpp - Sparc Machine Function Info --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcMachineFunctionInfo.h"
+
+using namespace llvm;
+
+void SparcMachineFunctionInfo::anchor() { }
diff --git a/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
new file mode 100644
index 000000000000..104744279d9d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcMachineFunctionInfo.h
@@ -0,0 +1,56 @@
+//===- SparcMachineFunctionInfo.h - Sparc Machine Function Info -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares Sparc specific per-machine-function information.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCMACHINEFUNCTIONINFO_H
+#define LLVM_LIB_TARGET_SPARC_SPARCMACHINEFUNCTIONINFO_H
+
+#include "llvm/CodeGen/MachineFunction.h"
+
+namespace llvm {
+
+ class SparcMachineFunctionInfo : public MachineFunctionInfo {
+ virtual void anchor();
+ private:
+ unsigned GlobalBaseReg;
+
+ /// VarArgsFrameOffset - Frame offset to start of varargs area.
+ int VarArgsFrameOffset;
+
+ /// SRetReturnReg - Holds the virtual register into which the sret
+ /// argument is passed.
+ unsigned SRetReturnReg;
+
+ /// IsLeafProc - True if the function is a leaf procedure.
+ bool IsLeafProc;
+ public:
+ SparcMachineFunctionInfo()
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
+ IsLeafProc(false) {}
+ explicit SparcMachineFunctionInfo(MachineFunction &MF)
+ : GlobalBaseReg(0), VarArgsFrameOffset(0), SRetReturnReg(0),
+ IsLeafProc(false) {}
+
+ unsigned getGlobalBaseReg() const { return GlobalBaseReg; }
+ void setGlobalBaseReg(unsigned Reg) { GlobalBaseReg = Reg; }
+
+ int getVarArgsFrameOffset() const { return VarArgsFrameOffset; }
+ void setVarArgsFrameOffset(int Offset) { VarArgsFrameOffset = Offset; }
+
+ unsigned getSRetReturnReg() const { return SRetReturnReg; }
+ void setSRetReturnReg(unsigned Reg) { SRetReturnReg = Reg; }
+
+ void setLeafProc(bool rhs) { IsLeafProc = rhs; }
+ bool isLeafProc() const { return IsLeafProc; }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
new file mode 100644
index 000000000000..b9647eaa3d51
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.cpp
@@ -0,0 +1,237 @@
+//===-- SparcRegisterInfo.cpp - SPARC Register Information ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the SPARC implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcRegisterInfo.h"
+#include "Sparc.h"
+#include "SparcMachineFunctionInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
+#include "llvm/CodeGen/TargetInstrInfo.h"
+#include "llvm/IR/Type.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+#define GET_REGINFO_TARGET_DESC
+#include "SparcGenRegisterInfo.inc"
+
+static cl::opt<bool>
+ReserveAppRegisters("sparc-reserve-app-registers", cl::Hidden, cl::init(false),
+ cl::desc("Reserve application registers (%g2-%g4)"));
+
+SparcRegisterInfo::SparcRegisterInfo() : SparcGenRegisterInfo(SP::O7) {}
+
+const MCPhysReg*
+SparcRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
+ return CSR_SaveList;
+}
+
+const uint32_t *
+SparcRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID CC) const {
+ return CSR_RegMask;
+}
+
+const uint32_t*
+SparcRegisterInfo::getRTCallPreservedMask(CallingConv::ID CC) const {
+ return RTCSR_RegMask;
+}
+
+BitVector SparcRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
+ BitVector Reserved(getNumRegs());
+ const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
+ // FIXME: G1 reserved for now for large imm generation by frame code.
+ Reserved.set(SP::G1);
+
+ // G1-G4 can be used in applications.
+ if (ReserveAppRegisters) {
+ Reserved.set(SP::G2);
+ Reserved.set(SP::G3);
+ Reserved.set(SP::G4);
+ }
+ // G5 is not reserved in 64 bit mode.
+ if (!Subtarget.is64Bit())
+ Reserved.set(SP::G5);
+
+ Reserved.set(SP::O6);
+ Reserved.set(SP::I6);
+ Reserved.set(SP::I7);
+ Reserved.set(SP::G0);
+ Reserved.set(SP::G6);
+ Reserved.set(SP::G7);
+
+ // Also reserve the register pair aliases covering the above
+ // registers, with the same conditions.
+ Reserved.set(SP::G0_G1);
+ if (ReserveAppRegisters)
+ Reserved.set(SP::G2_G3);
+ if (ReserveAppRegisters || !Subtarget.is64Bit())
+ Reserved.set(SP::G4_G5);
+
+ Reserved.set(SP::O6_O7);
+ Reserved.set(SP::I6_I7);
+ Reserved.set(SP::G6_G7);
+
+ // Unaliased double registers are not available in non-V9 targets.
+ if (!Subtarget.isV9()) {
+ for (unsigned n = 0; n != 16; ++n) {
+ for (MCRegAliasIterator AI(SP::D16 + n, this, true); AI.isValid(); ++AI)
+ Reserved.set(*AI);
+ }
+ }
+
+ return Reserved;
+}
+
+const TargetRegisterClass*
+SparcRegisterInfo::getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const {
+ const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
+ return Subtarget.is64Bit() ? &SP::I64RegsRegClass : &SP::IntRegsRegClass;
+}
+
+static void replaceFI(MachineFunction &MF, MachineBasicBlock::iterator II,
+ MachineInstr &MI, const DebugLoc &dl,
+ unsigned FIOperandNum, int Offset, unsigned FramePtr) {
+ // Replace frame index with a frame pointer reference.
+ if (Offset >= -4096 && Offset <= 4095) {
+ // If the offset is small enough to fit in the immediate field, directly
+ // encode it.
+ MI.getOperand(FIOperandNum).ChangeToRegister(FramePtr, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
+ return;
+ }
+
+ const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
+
+ // FIXME: it would be better to scavenge a register here instead of
+ // reserving G1 all of the time.
+ if (Offset >= 0) {
+ // Emit nonnegaive immediates with sethi + or.
+ // sethi %hi(Offset), %g1
+ // add %g1, %fp, %g1
+ // Insert G1+%lo(offset) into the user.
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HI22(Offset));
+
+
+ // Emit G1 = G1 + I6
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::ADDrr), SP::G1).addReg(SP::G1)
+ .addReg(FramePtr);
+ // Insert: G1+%lo(offset) into the user.
+ MI.getOperand(FIOperandNum).ChangeToRegister(SP::G1, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(LO10(Offset));
+ return;
+ }
+
+ // Emit Negative numbers with sethi + xor
+ // sethi %hix(Offset), %g1
+ // xor %g1, %lox(offset), %g1
+ // add %g1, %fp, %g1
+ // Insert: G1 + 0 into the user.
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::SETHIi), SP::G1)
+ .addImm(HIX22(Offset));
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::XORri), SP::G1)
+ .addReg(SP::G1).addImm(LOX10(Offset));
+
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::ADDrr), SP::G1).addReg(SP::G1)
+ .addReg(FramePtr);
+ // Insert: G1+%lo(offset) into the user.
+ MI.getOperand(FIOperandNum).ChangeToRegister(SP::G1, false);
+ MI.getOperand(FIOperandNum + 1).ChangeToImmediate(0);
+}
+
+
+void
+SparcRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS) const {
+ assert(SPAdj == 0 && "Unexpected");
+
+ MachineInstr &MI = *II;
+ DebugLoc dl = MI.getDebugLoc();
+ int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
+ MachineFunction &MF = *MI.getParent()->getParent();
+ const SparcSubtarget &Subtarget = MF.getSubtarget<SparcSubtarget>();
+ const SparcFrameLowering *TFI = getFrameLowering(MF);
+
+ unsigned FrameReg;
+ int Offset;
+ Offset = TFI->getFrameIndexReference(MF, FrameIndex, FrameReg);
+
+ Offset += MI.getOperand(FIOperandNum + 1).getImm();
+
+ if (!Subtarget.isV9() || !Subtarget.hasHardQuad()) {
+ if (MI.getOpcode() == SP::STQFri) {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ unsigned SrcReg = MI.getOperand(2).getReg();
+ unsigned SrcEvenReg = getSubReg(SrcReg, SP::sub_even64);
+ unsigned SrcOddReg = getSubReg(SrcReg, SP::sub_odd64);
+ MachineInstr *StMI =
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::STDFri))
+ .addReg(FrameReg).addImm(0).addReg(SrcEvenReg);
+ replaceFI(MF, II, *StMI, dl, 0, Offset, FrameReg);
+ MI.setDesc(TII.get(SP::STDFri));
+ MI.getOperand(2).setReg(SrcOddReg);
+ Offset += 8;
+ } else if (MI.getOpcode() == SP::LDQFri) {
+ const TargetInstrInfo &TII = *Subtarget.getInstrInfo();
+ unsigned DestReg = MI.getOperand(0).getReg();
+ unsigned DestEvenReg = getSubReg(DestReg, SP::sub_even64);
+ unsigned DestOddReg = getSubReg(DestReg, SP::sub_odd64);
+ MachineInstr *StMI =
+ BuildMI(*MI.getParent(), II, dl, TII.get(SP::LDDFri), DestEvenReg)
+ .addReg(FrameReg).addImm(0);
+ replaceFI(MF, II, *StMI, dl, 1, Offset, FrameReg);
+
+ MI.setDesc(TII.get(SP::LDDFri));
+ MI.getOperand(0).setReg(DestOddReg);
+ Offset += 8;
+ }
+ }
+
+ replaceFI(MF, II, MI, dl, FIOperandNum, Offset, FrameReg);
+
+}
+
+unsigned SparcRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
+ return SP::I6;
+}
+
+// Sparc has no architectural need for stack realignment support,
+// except that LLVM unfortunately currently implements overaligned
+// stack objects by depending upon stack realignment support.
+// If that ever changes, this can probably be deleted.
+bool SparcRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+ if (!TargetRegisterInfo::canRealignStack(MF))
+ return false;
+
+ // Sparc always has a fixed frame pointer register, so don't need to
+ // worry about needing to reserve it. [even if we don't have a frame
+ // pointer for our frame, it still cannot be used for other things,
+ // or register window traps will be SADNESS.]
+
+ // If there's a reserved call frame, we can use SP to access locals.
+ if (getFrameLowering(MF)->hasReservedCallFrame(MF))
+ return true;
+
+ // Otherwise, we'd need a base pointer, but those aren't implemented
+ // for SPARC at the moment.
+
+ return false;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
new file mode 100644
index 000000000000..8dd2569d10de
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.h
@@ -0,0 +1,50 @@
+//===-- SparcRegisterInfo.h - Sparc Register Information Impl ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the Sparc implementation of the TargetRegisterInfo class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCREGISTERINFO_H
+#define LLVM_LIB_TARGET_SPARC_SPARCREGISTERINFO_H
+
+#include "llvm/CodeGen/TargetRegisterInfo.h"
+
+#define GET_REGINFO_HEADER
+#include "SparcGenRegisterInfo.inc"
+
+namespace llvm {
+struct SparcRegisterInfo : public SparcGenRegisterInfo {
+ SparcRegisterInfo();
+
+ /// Code Generation virtual methods...
+ const MCPhysReg *getCalleeSavedRegs(const MachineFunction *MF) const override;
+ const uint32_t *getCallPreservedMask(const MachineFunction &MF,
+ CallingConv::ID CC) const override;
+
+ const uint32_t* getRTCallPreservedMask(CallingConv::ID CC) const;
+
+ BitVector getReservedRegs(const MachineFunction &MF) const override;
+
+ const TargetRegisterClass *getPointerRegClass(const MachineFunction &MF,
+ unsigned Kind) const override;
+
+ void eliminateFrameIndex(MachineBasicBlock::iterator II,
+ int SPAdj, unsigned FIOperandNum,
+ RegScavenger *RS = nullptr) const override;
+
+ unsigned getFrameRegister(const MachineFunction &MF) const override;
+
+ bool canRealignStack(const MachineFunction &MF) const override;
+
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
new file mode 100644
index 000000000000..6625eaafd992
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcRegisterInfo.td
@@ -0,0 +1,379 @@
+//===-- SparcRegisterInfo.td - Sparc Register defs ---------*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Declarations that describe the Sparc register file
+//===----------------------------------------------------------------------===//
+
+class SparcReg<bits<16> Enc, string n> : Register<n> {
+ let HWEncoding = Enc;
+ let Namespace = "SP";
+}
+
+class SparcCtrlReg<bits<16> Enc, string n>: Register<n> {
+ let HWEncoding = Enc;
+ let Namespace = "SP";
+}
+
+let Namespace = "SP" in {
+def sub_even : SubRegIndex<32>;
+def sub_odd : SubRegIndex<32, 32>;
+def sub_even64 : SubRegIndex<64>;
+def sub_odd64 : SubRegIndex<64, 64>;
+}
+
+// Registers are identified with 5-bit ID numbers.
+// Ri - 32-bit integer registers
+class Ri<bits<16> Enc, string n> : SparcReg<Enc, n>;
+
+// Rdi - pairs of 32-bit integer registers
+class Rdi<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
+ let SubRegs = subregs;
+ let SubRegIndices = [sub_even, sub_odd];
+ let CoveredBySubRegs = 1;
+}
+// Rf - 32-bit floating-point registers
+class Rf<bits<16> Enc, string n> : SparcReg<Enc, n>;
+
+// Rd - Slots in the FP register file for 64-bit floating-point values.
+class Rd<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
+ let SubRegs = subregs;
+ let SubRegIndices = [sub_even, sub_odd];
+ let CoveredBySubRegs = 1;
+}
+
+// Rq - Slots in the FP register file for 128-bit floating-point values.
+class Rq<bits<16> Enc, string n, list<Register> subregs> : SparcReg<Enc, n> {
+ let SubRegs = subregs;
+ let SubRegIndices = [sub_even64, sub_odd64];
+ let CoveredBySubRegs = 1;
+}
+
+// Control Registers
+def ICC : SparcCtrlReg<0, "ICC">; // This represents icc and xcc in 64-bit code.
+foreach I = 0-3 in
+ def FCC#I : SparcCtrlReg<I, "FCC"#I>;
+
+def FSR : SparcCtrlReg<0, "FSR">; // Floating-point state register.
+
+def FQ : SparcCtrlReg<0, "FQ">; // Floating-point deferred-trap queue.
+
+def CPSR : SparcCtrlReg<0, "CPSR">; // Co-processor state register.
+
+def CPQ : SparcCtrlReg<0, "CPQ">; // Co-processor queue.
+
+// Y register
+def Y : SparcCtrlReg<0, "Y">, DwarfRegNum<[64]>;
+// Ancillary state registers (implementation defined)
+def ASR1 : SparcCtrlReg<1, "ASR1">;
+def ASR2 : SparcCtrlReg<2, "ASR2">;
+def ASR3 : SparcCtrlReg<3, "ASR3">;
+def ASR4 : SparcCtrlReg<4, "ASR4">;
+def ASR5 : SparcCtrlReg<5, "ASR5">;
+def ASR6 : SparcCtrlReg<6, "ASR6">;
+def ASR7 : SparcCtrlReg<7, "ASR7">;
+def ASR8 : SparcCtrlReg<8, "ASR8">;
+def ASR9 : SparcCtrlReg<9, "ASR9">;
+def ASR10 : SparcCtrlReg<10, "ASR10">;
+def ASR11 : SparcCtrlReg<11, "ASR11">;
+def ASR12 : SparcCtrlReg<12, "ASR12">;
+def ASR13 : SparcCtrlReg<13, "ASR13">;
+def ASR14 : SparcCtrlReg<14, "ASR14">;
+def ASR15 : SparcCtrlReg<15, "ASR15">;
+def ASR16 : SparcCtrlReg<16, "ASR16">;
+def ASR17 : SparcCtrlReg<17, "ASR17">;
+def ASR18 : SparcCtrlReg<18, "ASR18">;
+def ASR19 : SparcCtrlReg<19, "ASR19">;
+def ASR20 : SparcCtrlReg<20, "ASR20">;
+def ASR21 : SparcCtrlReg<21, "ASR21">;
+def ASR22 : SparcCtrlReg<22, "ASR22">;
+def ASR23 : SparcCtrlReg<23, "ASR23">;
+def ASR24 : SparcCtrlReg<24, "ASR24">;
+def ASR25 : SparcCtrlReg<25, "ASR25">;
+def ASR26 : SparcCtrlReg<26, "ASR26">;
+def ASR27 : SparcCtrlReg<27, "ASR27">;
+def ASR28 : SparcCtrlReg<28, "ASR28">;
+def ASR29 : SparcCtrlReg<29, "ASR29">;
+def ASR30 : SparcCtrlReg<30, "ASR30">;
+def ASR31 : SparcCtrlReg<31, "ASR31">;
+
+// Note that PSR, WIM, and TBR don't exist on the SparcV9, only the V8.
+def PSR : SparcCtrlReg<0, "PSR">;
+def WIM : SparcCtrlReg<0, "WIM">;
+def TBR : SparcCtrlReg<0, "TBR">;
+
+def TPC : SparcCtrlReg<0, "TPC">;
+def TNPC : SparcCtrlReg<1, "TNPC">;
+def TSTATE : SparcCtrlReg<2, "TSTATE">;
+def TT : SparcCtrlReg<3, "TT">;
+def TICK : SparcCtrlReg<4, "TICK">;
+def TBA : SparcCtrlReg<5, "TBA">;
+def PSTATE : SparcCtrlReg<6, "PSTATE">;
+def TL : SparcCtrlReg<7, "TL">;
+def PIL : SparcCtrlReg<8, "PIL">;
+def CWP : SparcCtrlReg<9, "CWP">;
+def CANSAVE : SparcCtrlReg<10, "CANSAVE">;
+def CANRESTORE : SparcCtrlReg<11, "CANRESTORE">;
+def CLEANWIN : SparcCtrlReg<12, "CLEANWIN">;
+def OTHERWIN : SparcCtrlReg<13, "OTHERWIN">;
+def WSTATE : SparcCtrlReg<14, "WSTATE">;
+
+// Integer registers
+def G0 : Ri< 0, "G0">, DwarfRegNum<[0]>;
+def G1 : Ri< 1, "G1">, DwarfRegNum<[1]>;
+def G2 : Ri< 2, "G2">, DwarfRegNum<[2]>;
+def G3 : Ri< 3, "G3">, DwarfRegNum<[3]>;
+def G4 : Ri< 4, "G4">, DwarfRegNum<[4]>;
+def G5 : Ri< 5, "G5">, DwarfRegNum<[5]>;
+def G6 : Ri< 6, "G6">, DwarfRegNum<[6]>;
+def G7 : Ri< 7, "G7">, DwarfRegNum<[7]>;
+def O0 : Ri< 8, "O0">, DwarfRegNum<[8]>;
+def O1 : Ri< 9, "O1">, DwarfRegNum<[9]>;
+def O2 : Ri<10, "O2">, DwarfRegNum<[10]>;
+def O3 : Ri<11, "O3">, DwarfRegNum<[11]>;
+def O4 : Ri<12, "O4">, DwarfRegNum<[12]>;
+def O5 : Ri<13, "O5">, DwarfRegNum<[13]>;
+def O6 : Ri<14, "SP">, DwarfRegNum<[14]>;
+def O7 : Ri<15, "O7">, DwarfRegNum<[15]>;
+def L0 : Ri<16, "L0">, DwarfRegNum<[16]>;
+def L1 : Ri<17, "L1">, DwarfRegNum<[17]>;
+def L2 : Ri<18, "L2">, DwarfRegNum<[18]>;
+def L3 : Ri<19, "L3">, DwarfRegNum<[19]>;
+def L4 : Ri<20, "L4">, DwarfRegNum<[20]>;
+def L5 : Ri<21, "L5">, DwarfRegNum<[21]>;
+def L6 : Ri<22, "L6">, DwarfRegNum<[22]>;
+def L7 : Ri<23, "L7">, DwarfRegNum<[23]>;
+def I0 : Ri<24, "I0">, DwarfRegNum<[24]>;
+def I1 : Ri<25, "I1">, DwarfRegNum<[25]>;
+def I2 : Ri<26, "I2">, DwarfRegNum<[26]>;
+def I3 : Ri<27, "I3">, DwarfRegNum<[27]>;
+def I4 : Ri<28, "I4">, DwarfRegNum<[28]>;
+def I5 : Ri<29, "I5">, DwarfRegNum<[29]>;
+def I6 : Ri<30, "FP">, DwarfRegNum<[30]>;
+def I7 : Ri<31, "I7">, DwarfRegNum<[31]>;
+
+// Floating-point registers
+def F0 : Rf< 0, "F0">, DwarfRegNum<[32]>;
+def F1 : Rf< 1, "F1">, DwarfRegNum<[33]>;
+def F2 : Rf< 2, "F2">, DwarfRegNum<[34]>;
+def F3 : Rf< 3, "F3">, DwarfRegNum<[35]>;
+def F4 : Rf< 4, "F4">, DwarfRegNum<[36]>;
+def F5 : Rf< 5, "F5">, DwarfRegNum<[37]>;
+def F6 : Rf< 6, "F6">, DwarfRegNum<[38]>;
+def F7 : Rf< 7, "F7">, DwarfRegNum<[39]>;
+def F8 : Rf< 8, "F8">, DwarfRegNum<[40]>;
+def F9 : Rf< 9, "F9">, DwarfRegNum<[41]>;
+def F10 : Rf<10, "F10">, DwarfRegNum<[42]>;
+def F11 : Rf<11, "F11">, DwarfRegNum<[43]>;
+def F12 : Rf<12, "F12">, DwarfRegNum<[44]>;
+def F13 : Rf<13, "F13">, DwarfRegNum<[45]>;
+def F14 : Rf<14, "F14">, DwarfRegNum<[46]>;
+def F15 : Rf<15, "F15">, DwarfRegNum<[47]>;
+def F16 : Rf<16, "F16">, DwarfRegNum<[48]>;
+def F17 : Rf<17, "F17">, DwarfRegNum<[49]>;
+def F18 : Rf<18, "F18">, DwarfRegNum<[50]>;
+def F19 : Rf<19, "F19">, DwarfRegNum<[51]>;
+def F20 : Rf<20, "F20">, DwarfRegNum<[52]>;
+def F21 : Rf<21, "F21">, DwarfRegNum<[53]>;
+def F22 : Rf<22, "F22">, DwarfRegNum<[54]>;
+def F23 : Rf<23, "F23">, DwarfRegNum<[55]>;
+def F24 : Rf<24, "F24">, DwarfRegNum<[56]>;
+def F25 : Rf<25, "F25">, DwarfRegNum<[57]>;
+def F26 : Rf<26, "F26">, DwarfRegNum<[58]>;
+def F27 : Rf<27, "F27">, DwarfRegNum<[59]>;
+def F28 : Rf<28, "F28">, DwarfRegNum<[60]>;
+def F29 : Rf<29, "F29">, DwarfRegNum<[61]>;
+def F30 : Rf<30, "F30">, DwarfRegNum<[62]>;
+def F31 : Rf<31, "F31">, DwarfRegNum<[63]>;
+
+// Aliases of the F* registers used to hold 64-bit fp values (doubles)
+def D0 : Rd< 0, "F0", [F0, F1]>, DwarfRegNum<[72]>;
+def D1 : Rd< 2, "F2", [F2, F3]>, DwarfRegNum<[73]>;
+def D2 : Rd< 4, "F4", [F4, F5]>, DwarfRegNum<[74]>;
+def D3 : Rd< 6, "F6", [F6, F7]>, DwarfRegNum<[75]>;
+def D4 : Rd< 8, "F8", [F8, F9]>, DwarfRegNum<[76]>;
+def D5 : Rd<10, "F10", [F10, F11]>, DwarfRegNum<[77]>;
+def D6 : Rd<12, "F12", [F12, F13]>, DwarfRegNum<[78]>;
+def D7 : Rd<14, "F14", [F14, F15]>, DwarfRegNum<[79]>;
+def D8 : Rd<16, "F16", [F16, F17]>, DwarfRegNum<[80]>;
+def D9 : Rd<18, "F18", [F18, F19]>, DwarfRegNum<[81]>;
+def D10 : Rd<20, "F20", [F20, F21]>, DwarfRegNum<[82]>;
+def D11 : Rd<22, "F22", [F22, F23]>, DwarfRegNum<[83]>;
+def D12 : Rd<24, "F24", [F24, F25]>, DwarfRegNum<[84]>;
+def D13 : Rd<26, "F26", [F26, F27]>, DwarfRegNum<[85]>;
+def D14 : Rd<28, "F28", [F28, F29]>, DwarfRegNum<[86]>;
+def D15 : Rd<30, "F30", [F30, F31]>, DwarfRegNum<[87]>;
+
+// Co-processor registers
+def C0 : Ri< 0, "C0">;
+def C1 : Ri< 1, "C1">;
+def C2 : Ri< 2, "C2">;
+def C3 : Ri< 3, "C3">;
+def C4 : Ri< 4, "C4">;
+def C5 : Ri< 5, "C5">;
+def C6 : Ri< 6, "C6">;
+def C7 : Ri< 7, "C7">;
+def C8 : Ri< 8, "C8">;
+def C9 : Ri< 9, "C9">;
+def C10 : Ri< 10, "C10">;
+def C11 : Ri< 11, "C11">;
+def C12 : Ri< 12, "C12">;
+def C13 : Ri< 13, "C13">;
+def C14 : Ri< 14, "C14">;
+def C15 : Ri< 15, "C15">;
+def C16 : Ri< 16, "C16">;
+def C17 : Ri< 17, "C17">;
+def C18 : Ri< 18, "C18">;
+def C19 : Ri< 19, "C19">;
+def C20 : Ri< 20, "C20">;
+def C21 : Ri< 21, "C21">;
+def C22 : Ri< 22, "C22">;
+def C23 : Ri< 23, "C23">;
+def C24 : Ri< 24, "C24">;
+def C25 : Ri< 25, "C25">;
+def C26 : Ri< 26, "C26">;
+def C27 : Ri< 27, "C27">;
+def C28 : Ri< 28, "C28">;
+def C29 : Ri< 29, "C29">;
+def C30 : Ri< 30, "C30">;
+def C31 : Ri< 31, "C31">;
+
+// Unaliased double precision floating point registers.
+// FIXME: Define DwarfRegNum for these registers.
+def D16 : SparcReg< 1, "F32">;
+def D17 : SparcReg< 3, "F34">;
+def D18 : SparcReg< 5, "F36">;
+def D19 : SparcReg< 7, "F38">;
+def D20 : SparcReg< 9, "F40">;
+def D21 : SparcReg<11, "F42">;
+def D22 : SparcReg<13, "F44">;
+def D23 : SparcReg<15, "F46">;
+def D24 : SparcReg<17, "F48">;
+def D25 : SparcReg<19, "F50">;
+def D26 : SparcReg<21, "F52">;
+def D27 : SparcReg<23, "F54">;
+def D28 : SparcReg<25, "F56">;
+def D29 : SparcReg<27, "F58">;
+def D30 : SparcReg<29, "F60">;
+def D31 : SparcReg<31, "F62">;
+
+// Aliases of the F* registers used to hold 128-bit for values (long doubles).
+def Q0 : Rq< 0, "F0", [D0, D1]>;
+def Q1 : Rq< 4, "F4", [D2, D3]>;
+def Q2 : Rq< 8, "F8", [D4, D5]>;
+def Q3 : Rq<12, "F12", [D6, D7]>;
+def Q4 : Rq<16, "F16", [D8, D9]>;
+def Q5 : Rq<20, "F20", [D10, D11]>;
+def Q6 : Rq<24, "F24", [D12, D13]>;
+def Q7 : Rq<28, "F28", [D14, D15]>;
+def Q8 : Rq< 1, "F32", [D16, D17]>;
+def Q9 : Rq< 5, "F36", [D18, D19]>;
+def Q10 : Rq< 9, "F40", [D20, D21]>;
+def Q11 : Rq<13, "F44", [D22, D23]>;
+def Q12 : Rq<17, "F48", [D24, D25]>;
+def Q13 : Rq<21, "F52", [D26, D27]>;
+def Q14 : Rq<25, "F56", [D28, D29]>;
+def Q15 : Rq<29, "F60", [D30, D31]>;
+
+// Aliases of the integer registers used for LDD/STD double-word operations
+def G0_G1 : Rdi<0, "G0", [G0, G1]>;
+def G2_G3 : Rdi<2, "G2", [G2, G3]>;
+def G4_G5 : Rdi<4, "G4", [G4, G5]>;
+def G6_G7 : Rdi<6, "G6", [G6, G7]>;
+def O0_O1 : Rdi<8, "O0", [O0, O1]>;
+def O2_O3 : Rdi<10, "O2", [O2, O3]>;
+def O4_O5 : Rdi<12, "O4", [O4, O5]>;
+def O6_O7 : Rdi<14, "O6", [O6, O7]>;
+def L0_L1 : Rdi<16, "L0", [L0, L1]>;
+def L2_L3 : Rdi<18, "L2", [L2, L3]>;
+def L4_L5 : Rdi<20, "L4", [L4, L5]>;
+def L6_L7 : Rdi<22, "L6", [L6, L7]>;
+def I0_I1 : Rdi<24, "I0", [I0, I1]>;
+def I2_I3 : Rdi<26, "I2", [I2, I3]>;
+def I4_I5 : Rdi<28, "I4", [I4, I5]>;
+def I6_I7 : Rdi<30, "I6", [I6, I7]>;
+
+// Aliases of the co-processor registers used for LDD/STD double-word operations
+def C0_C1 : Rdi<0, "C0", [C0, C1]>;
+def C2_C3 : Rdi<2, "C2", [C2, C3]>;
+def C4_C5 : Rdi<4, "C4", [C4, C5]>;
+def C6_C7 : Rdi<6, "C6", [C6, C7]>;
+def C8_C9 : Rdi<8, "C8", [C8, C9]>;
+def C10_C11 : Rdi<10, "C10", [C10, C11]>;
+def C12_C13 : Rdi<12, "C12", [C12, C13]>;
+def C14_C15 : Rdi<14, "C14", [C14, C15]>;
+def C16_C17 : Rdi<16, "C16", [C16, C17]>;
+def C18_C19 : Rdi<18, "C18", [C18, C19]>;
+def C20_C21 : Rdi<20, "C20", [C20, C21]>;
+def C22_C23 : Rdi<22, "C22", [C22, C23]>;
+def C24_C25 : Rdi<24, "C24", [C24, C25]>;
+def C26_C27 : Rdi<26, "C26", [C26, C27]>;
+def C28_C29 : Rdi<28, "C28", [C28, C29]>;
+def C30_C31 : Rdi<30, "C30", [C30, C31]>;
+
+// Register classes.
+//
+// FIXME: the register order should be defined in terms of the preferred
+// allocation order...
+//
+// This register class should not be used to hold i64 values, use the I64Regs
+// register class for that. The i64 type is included here to allow i64 patterns
+// using the integer instructions.
+def IntRegs : RegisterClass<"SP", [i32, i64], 32,
+ (add (sequence "I%u", 0, 7),
+ (sequence "G%u", 0, 7),
+ (sequence "L%u", 0, 7),
+ (sequence "O%u", 0, 7))>;
+
+// Should be in the same order as IntRegs.
+def IntPair : RegisterClass<"SP", [v2i32], 64,
+ (add I0_I1, I2_I3, I4_I5, I6_I7,
+ G0_G1, G2_G3, G4_G5, G6_G7,
+ L0_L1, L2_L3, L4_L5, L6_L7,
+ O0_O1, O2_O3, O4_O5, O6_O7)>;
+
+// Register class for 64-bit mode, with a 64-bit spill slot size.
+// These are the same as the 32-bit registers, so TableGen will consider this
+// to be a sub-class of IntRegs. That works out because requiring a 64-bit
+// spill slot is a stricter constraint than only requiring a 32-bit spill slot.
+def I64Regs : RegisterClass<"SP", [i64], 64, (add IntRegs)>;
+
+// Floating point register classes.
+def FPRegs : RegisterClass<"SP", [f32], 32, (sequence "F%u", 0, 31)>;
+def DFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 31)>;
+def QFPRegs : RegisterClass<"SP", [f128], 128, (sequence "Q%u", 0, 15)>;
+
+// The Low?FPRegs classes are used only for inline-asm constraints.
+def LowDFPRegs : RegisterClass<"SP", [f64], 64, (sequence "D%u", 0, 15)>;
+def LowQFPRegs : RegisterClass<"SP", [f128], 128, (sequence "Q%u", 0, 7)>;
+
+// Floating point control register classes.
+def FCCRegs : RegisterClass<"SP", [i1], 1, (sequence "FCC%u", 0, 3)>;
+
+let isAllocatable = 0 in {
+ // Ancillary state registers
+ def ASRRegs : RegisterClass<"SP", [i32], 32,
+ (add Y, (sequence "ASR%u", 1, 31))>;
+
+ // This register class should not be used to hold i64 values.
+ def CoprocRegs : RegisterClass<"SP", [i32], 32,
+ (add (sequence "C%u", 0, 31))>;
+
+ // Should be in the same order as CoprocRegs.
+ def CoprocPair : RegisterClass<"SP", [v2i32], 64,
+ (add C0_C1, C2_C3, C4_C5, C6_C7,
+ C8_C9, C10_C11, C12_C13, C14_C15,
+ C16_C17, C18_C19, C20_C21, C22_C23,
+ C24_C25, C26_C27, C28_C29, C30_C31)>;
+}
+
+// Privileged Registers
+def PRRegs : RegisterClass<"SP", [i64], 64,
+ (add TPC, TNPC, TSTATE, TT, TICK, TBA, PSTATE, TL, PIL, CWP,
+ CANSAVE, CANRESTORE, CLEANWIN, OTHERWIN, WSTATE)>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSchedule.td b/contrib/llvm/lib/Target/Sparc/SparcSchedule.td
new file mode 100755
index 000000000000..f243546b029b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcSchedule.td
@@ -0,0 +1,124 @@
+//===-- SparcSchedule.td - Describe the Sparc Itineries ----*- tablegen -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+def IIC_iu_or_fpu_instr : InstrItinClass;
+def IIC_iu_instr : InstrItinClass;
+def IIC_fpu_normal_instr : InstrItinClass;
+def IIC_fpu_fast_instr : InstrItinClass;
+def IIC_jmp_or_call : InstrItinClass;
+def IIC_ldd : InstrItinClass;
+def IIC_st : InstrItinClass;
+def IIC_std : InstrItinClass;
+def IIC_iu_smul : InstrItinClass;
+def IIC_iu_umul : InstrItinClass;
+def IIC_iu_div : InstrItinClass;
+def IIC_ticc : InstrItinClass;
+def IIC_ldstub : InstrItinClass;
+def IIC_fpu_muls : InstrItinClass;
+def IIC_fpu_muld : InstrItinClass;
+def IIC_fpu_divs : InstrItinClass;
+def IIC_fpu_divd : InstrItinClass;
+def IIC_fpu_sqrts : InstrItinClass;
+def IIC_fpu_sqrtd : InstrItinClass;
+def IIC_fpu_abs : InstrItinClass;
+def IIC_fpu_movs : InstrItinClass;
+def IIC_fpu_negs : InstrItinClass;
+def IIC_smac_umac : InstrItinClass;
+def IIC_fpu_stod : InstrItinClass;
+
+def LEONIU : FuncUnit; // integer unit
+def LEONFPU : FuncUnit; // floating-point unit
+
+// Ref: http://www.atmel.com/Images/doc4226.pdf
+
+def LEON2Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [5, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [5, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [21, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [20, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [36, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [37, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [65, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [2, 1]>
+]>;
+
+def LEON3Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [4, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_smac_umac, [InstrStage<1, [LEONIU]>], [2, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [17, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [24, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [25, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [4, 1]>
+]>;
+
+def LEON4Itineraries : ProcessorItineraries<
+[LEONIU, LEONFPU], [], [
+ InstrItinData<IIC_iu_or_fpu_instr, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_instr, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_fpu_normal_instr, [InstrStage<1, [LEONFPU]>], [7, 1]>,
+ InstrItinData<IIC_fpu_fast_instr, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_jmp_or_call, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_ldd, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_st, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_std, [InstrStage<1, [LEONIU, LEONFPU]>], [1, 1]>,
+ InstrItinData<IIC_iu_smul, [InstrStage<1, [LEONIU]>], [1, 1]>,
+ InstrItinData<IIC_iu_umul, [InstrStage<1, [LEONIU]>], [4, 1]>,
+ InstrItinData<IIC_iu_div, [InstrStage<1, [LEONIU]>], [35, 1]>,
+ InstrItinData<IIC_smac_umac, [InstrStage<1, [LEONIU]>], [2, 1]>,
+ InstrItinData<IIC_ticc, [InstrStage<1, [LEONIU, LEONFPU]>], [5, 1]>,
+ InstrItinData<IIC_ldstub, [InstrStage<1, [LEONIU, LEONFPU]>], [3, 1]>,
+ InstrItinData<IIC_fpu_muls, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_muld, [InstrStage<1, [LEONFPU]>], [4, 1]>,
+ InstrItinData<IIC_fpu_divs, [InstrStage<1, [LEONFPU]>], [16, 1]>,
+ InstrItinData<IIC_fpu_divd, [InstrStage<1, [LEONFPU]>], [17, 1]>,
+ InstrItinData<IIC_fpu_sqrts, [InstrStage<1, [LEONFPU]>], [24, 1]>,
+ InstrItinData<IIC_fpu_sqrtd, [InstrStage<1, [LEONFPU]>], [25, 1]>,
+ InstrItinData<IIC_fpu_abs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_movs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_negs, [InstrStage<1, [LEONFPU]>], [2, 1]>,
+ InstrItinData<IIC_fpu_stod, [InstrStage<1, [LEONFPU]>], [4, 1]>
+]>;
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
new file mode 100644
index 000000000000..01545b8d20a0
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.cpp
@@ -0,0 +1,102 @@
+//===-- SparcSubtarget.cpp - SPARC Subtarget Information ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SPARC specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcSubtarget.h"
+#include "Sparc.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/TargetRegistry.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "sparc-subtarget"
+
+#define GET_SUBTARGETINFO_TARGET_DESC
+#define GET_SUBTARGETINFO_CTOR
+#include "SparcGenSubtargetInfo.inc"
+
+void SparcSubtarget::anchor() { }
+
+SparcSubtarget &SparcSubtarget::initializeSubtargetDependencies(StringRef CPU,
+ StringRef FS) {
+ UseSoftMulDiv = false;
+ IsV9 = false;
+ IsLeon = false;
+ V8DeprecatedInsts = false;
+ IsVIS = false;
+ IsVIS2 = false;
+ IsVIS3 = false;
+ HasHardQuad = false;
+ UsePopc = false;
+ UseSoftFloat = false;
+ HasNoFSMULD = false;
+ HasNoFMULS = false;
+
+ // Leon features
+ HasLeonCasa = false;
+ HasUmacSmac = false;
+ PerformSDIVReplace = false;
+ InsertNOPLoad = false;
+ FixAllFDIVSQRT = false;
+ DetectRoundChange = false;
+
+ // Determine default and user specified characteristics
+ std::string CPUName = CPU;
+ if (CPUName.empty())
+ CPUName = (Is64Bit) ? "v9" : "v8";
+
+ // Parse features string.
+ ParseSubtargetFeatures(CPUName, FS);
+
+ // Popc is a v9-only instruction.
+ if (!IsV9)
+ UsePopc = false;
+
+ return *this;
+}
+
+SparcSubtarget::SparcSubtarget(const Triple &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM,
+ bool is64Bit)
+ : SparcGenSubtargetInfo(TT, CPU, FS), TargetTriple(TT), Is64Bit(is64Bit),
+ InstrInfo(initializeSubtargetDependencies(CPU, FS)), TLInfo(TM, *this),
+ FrameLowering(*this) {}
+
+int SparcSubtarget::getAdjustedFrameSize(int frameSize) const {
+
+ if (is64Bit()) {
+ // All 64-bit stack frames must be 16-byte aligned, and must reserve space
+ // for spilling the 16 window registers at %sp+BIAS..%sp+BIAS+128.
+ frameSize += 128;
+ // Frames with calls must also reserve space for 6 outgoing arguments
+ // whether they are used or not. LowerCall_64 takes care of that.
+ frameSize = alignTo(frameSize, 16);
+ } else {
+ // Emit the correct save instruction based on the number of bytes in
+ // the frame. Minimum stack frame size according to V8 ABI is:
+ // 16 words for register window spill
+ // 1 word for address of returned aggregate-value
+ // + 6 words for passing parameters on the stack
+ // ----------
+ // 23 words * 4 bytes per word = 92 bytes
+ frameSize += 92;
+
+ // Round up to next doubleword boundary -- a double-word boundary
+ // is required by the ABI.
+ frameSize = alignTo(frameSize, 8);
+ }
+ return frameSize;
+}
+
+bool SparcSubtarget::enableMachineScheduler() const {
+ return true;
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
new file mode 100644
index 000000000000..bcdc96e68103
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcSubtarget.h
@@ -0,0 +1,124 @@
+//===-- SparcSubtarget.h - Define Subtarget for the SPARC -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the SPARC specific subclass of TargetSubtargetInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCSUBTARGET_H
+#define LLVM_LIB_TARGET_SPARC_SPARCSUBTARGET_H
+
+#include "SparcFrameLowering.h"
+#include "SparcISelLowering.h"
+#include "SparcInstrInfo.h"
+#include "llvm/CodeGen/SelectionDAGTargetInfo.h"
+#include "llvm/CodeGen/TargetFrameLowering.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/DataLayout.h"
+#include <string>
+
+#define GET_SUBTARGETINFO_HEADER
+#include "SparcGenSubtargetInfo.inc"
+
+namespace llvm {
+class StringRef;
+
+class SparcSubtarget : public SparcGenSubtargetInfo {
+ Triple TargetTriple;
+ virtual void anchor();
+ bool UseSoftMulDiv;
+ bool IsV9;
+ bool IsLeon;
+ bool V8DeprecatedInsts;
+ bool IsVIS, IsVIS2, IsVIS3;
+ bool Is64Bit;
+ bool HasHardQuad;
+ bool UsePopc;
+ bool UseSoftFloat;
+ bool HasNoFSMULD;
+ bool HasNoFMULS;
+
+ // LEON features
+ bool HasUmacSmac;
+ bool HasLeonCasa;
+ bool InsertNOPLoad;
+ bool FixAllFDIVSQRT;
+ bool DetectRoundChange;
+ bool PerformSDIVReplace;
+
+ SparcInstrInfo InstrInfo;
+ SparcTargetLowering TLInfo;
+ SelectionDAGTargetInfo TSInfo;
+ SparcFrameLowering FrameLowering;
+
+public:
+ SparcSubtarget(const Triple &TT, const std::string &CPU,
+ const std::string &FS, const TargetMachine &TM, bool is64bit);
+
+ const SparcInstrInfo *getInstrInfo() const override { return &InstrInfo; }
+ const TargetFrameLowering *getFrameLowering() const override {
+ return &FrameLowering;
+ }
+ const SparcRegisterInfo *getRegisterInfo() const override {
+ return &InstrInfo.getRegisterInfo();
+ }
+ const SparcTargetLowering *getTargetLowering() const override {
+ return &TLInfo;
+ }
+ const SelectionDAGTargetInfo *getSelectionDAGInfo() const override {
+ return &TSInfo;
+ }
+
+ bool enableMachineScheduler() const override;
+
+ bool useSoftMulDiv() const { return UseSoftMulDiv; }
+ bool isV9() const { return IsV9; }
+ bool isLeon() const { return IsLeon; }
+ bool isVIS() const { return IsVIS; }
+ bool isVIS2() const { return IsVIS2; }
+ bool isVIS3() const { return IsVIS3; }
+ bool useDeprecatedV8Instructions() const { return V8DeprecatedInsts; }
+ bool hasHardQuad() const { return HasHardQuad; }
+ bool usePopc() const { return UsePopc; }
+ bool useSoftFloat() const { return UseSoftFloat; }
+ bool hasNoFSMULD() const { return HasNoFSMULD; }
+ bool hasNoFMULS() const { return HasNoFMULS; }
+
+ // Leon options
+ bool hasUmacSmac() const { return HasUmacSmac; }
+ bool performSDIVReplace() const { return PerformSDIVReplace; }
+ bool hasLeonCasa() const { return HasLeonCasa; }
+ bool insertNOPLoad() const { return InsertNOPLoad; }
+ bool fixAllFDIVSQRT() const { return FixAllFDIVSQRT; }
+ bool detectRoundChange() const { return DetectRoundChange; }
+
+ /// ParseSubtargetFeatures - Parses features string setting specified
+ /// subtarget options. Definition of function is auto generated by tblgen.
+ void ParseSubtargetFeatures(StringRef CPU, StringRef FS);
+ SparcSubtarget &initializeSubtargetDependencies(StringRef CPU, StringRef FS);
+
+ bool is64Bit() const { return Is64Bit; }
+
+ /// The 64-bit ABI uses biased stack and frame pointers, so the stack frame
+ /// of the current function is the area from [%sp+BIAS] to [%fp+BIAS].
+ int64_t getStackPointerBias() const {
+ return is64Bit() ? 2047 : 0;
+ }
+
+ /// Given a actual stack size as determined by FrameInfo, this function
+ /// returns adjusted framesize which includes space for register window
+ /// spills and arguments.
+ int getAdjustedFrameSize(int stackSize) const;
+
+ bool isTargetLinux() const { return TargetTriple.isOSLinux(); }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
new file mode 100644
index 000000000000..a0d40653fd9b
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.cpp
@@ -0,0 +1,213 @@
+//===-- SparcTargetMachine.cpp - Define TargetMachine for Sparc -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcTargetMachine.h"
+#include "LeonPasses.h"
+#include "Sparc.h"
+#include "SparcTargetObjectFile.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/IR/LegacyPassManager.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+extern "C" void LLVMInitializeSparcTarget() {
+ // Register the target.
+ RegisterTargetMachine<SparcV8TargetMachine> X(getTheSparcTarget());
+ RegisterTargetMachine<SparcV9TargetMachine> Y(getTheSparcV9Target());
+ RegisterTargetMachine<SparcelTargetMachine> Z(getTheSparcelTarget());
+}
+
+static std::string computeDataLayout(const Triple &T, bool is64Bit) {
+ // Sparc is typically big endian, but some are little.
+ std::string Ret = T.getArch() == Triple::sparcel ? "e" : "E";
+ Ret += "-m:e";
+
+ // Some ABIs have 32bit pointers.
+ if (!is64Bit)
+ Ret += "-p:32:32";
+
+ // Alignments for 64 bit integers.
+ Ret += "-i64:64";
+
+ // On SparcV9 128 floats are aligned to 128 bits, on others only to 64.
+ // On SparcV9 registers can hold 64 or 32 bits, on others only 32.
+ if (is64Bit)
+ Ret += "-n32:64";
+ else
+ Ret += "-f128:64-n32";
+
+ if (is64Bit)
+ Ret += "-S128";
+ else
+ Ret += "-S64";
+
+ return Ret;
+}
+
+static Reloc::Model getEffectiveRelocModel(Optional<Reloc::Model> RM) {
+ if (!RM.hasValue())
+ return Reloc::Static;
+ return *RM;
+}
+
+// Code models. Some only make sense for 64-bit code.
+//
+// SunCC Reloc CodeModel Constraints
+// abs32 Static Small text+data+bss linked below 2^32 bytes
+// abs44 Static Medium text+data+bss linked below 2^44 bytes
+// abs64 Static Large text smaller than 2^31 bytes
+// pic13 PIC_ Small GOT < 2^13 bytes
+// pic32 PIC_ Medium GOT < 2^32 bytes
+//
+// All code models require that the text segment is smaller than 2GB.
+static CodeModel::Model getEffectiveCodeModel(Optional<CodeModel::Model> CM,
+ Reloc::Model RM, bool Is64Bit,
+ bool JIT) {
+ if (CM)
+ return *CM;
+ if (Is64Bit) {
+ if (JIT)
+ return CodeModel::Large;
+ return RM == Reloc::PIC_ ? CodeModel::Small : CodeModel::Medium;
+ }
+ return CodeModel::Small;
+}
+
+/// Create an ILP32 architecture model
+SparcTargetMachine::SparcTargetMachine(
+ const Target &T, const Triple &TT, StringRef CPU, StringRef FS,
+ const TargetOptions &Options, Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM, CodeGenOpt::Level OL, bool JIT, bool is64bit)
+ : LLVMTargetMachine(
+ T, computeDataLayout(TT, is64bit), TT, CPU, FS, Options,
+ getEffectiveRelocModel(RM),
+ getEffectiveCodeModel(CM, getEffectiveRelocModel(RM), is64bit, JIT),
+ OL),
+ TLOF(make_unique<SparcELFTargetObjectFile>()),
+ Subtarget(TT, CPU, FS, *this, is64bit), is64Bit(is64bit) {
+ initAsmInfo();
+}
+
+SparcTargetMachine::~SparcTargetMachine() {}
+
+const SparcSubtarget *
+SparcTargetMachine::getSubtargetImpl(const Function &F) const {
+ Attribute CPUAttr = F.getFnAttribute("target-cpu");
+ Attribute FSAttr = F.getFnAttribute("target-features");
+
+ std::string CPU = !CPUAttr.hasAttribute(Attribute::None)
+ ? CPUAttr.getValueAsString().str()
+ : TargetCPU;
+ std::string FS = !FSAttr.hasAttribute(Attribute::None)
+ ? FSAttr.getValueAsString().str()
+ : TargetFS;
+
+ // FIXME: This is related to the code below to reset the target options,
+ // we need to know whether or not the soft float flag is set on the
+ // function, so we can enable it as a subtarget feature.
+ bool softFloat =
+ F.hasFnAttribute("use-soft-float") &&
+ F.getFnAttribute("use-soft-float").getValueAsString() == "true";
+
+ if (softFloat)
+ FS += FS.empty() ? "+soft-float" : ",+soft-float";
+
+ auto &I = SubtargetMap[CPU + FS];
+ if (!I) {
+ // This needs to be done before we create a new subtarget since any
+ // creation will depend on the TM and the code generation flags on the
+ // function that reside in TargetOptions.
+ resetTargetOptions(F);
+ I = llvm::make_unique<SparcSubtarget>(TargetTriple, CPU, FS, *this,
+ this->is64Bit);
+ }
+ return I.get();
+}
+
+namespace {
+/// Sparc Code Generator Pass Configuration Options.
+class SparcPassConfig : public TargetPassConfig {
+public:
+ SparcPassConfig(SparcTargetMachine &TM, PassManagerBase &PM)
+ : TargetPassConfig(TM, PM) {}
+
+ SparcTargetMachine &getSparcTargetMachine() const {
+ return getTM<SparcTargetMachine>();
+ }
+
+ void addIRPasses() override;
+ bool addInstSelector() override;
+ void addPreEmitPass() override;
+};
+} // namespace
+
+TargetPassConfig *SparcTargetMachine::createPassConfig(PassManagerBase &PM) {
+ return new SparcPassConfig(*this, PM);
+}
+
+void SparcPassConfig::addIRPasses() {
+ addPass(createAtomicExpandPass());
+
+ TargetPassConfig::addIRPasses();
+}
+
+bool SparcPassConfig::addInstSelector() {
+ addPass(createSparcISelDag(getSparcTargetMachine()));
+ return false;
+}
+
+void SparcPassConfig::addPreEmitPass(){
+ addPass(createSparcDelaySlotFillerPass());
+
+ if (this->getSparcTargetMachine().getSubtargetImpl()->insertNOPLoad())
+ {
+ addPass(new InsertNOPLoad());
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->detectRoundChange()) {
+ addPass(new DetectRoundChange());
+ }
+ if (this->getSparcTargetMachine().getSubtargetImpl()->fixAllFDIVSQRT())
+ {
+ addPass(new FixAllFDIVSQRT());
+ }
+}
+
+void SparcV8TargetMachine::anchor() { }
+
+SparcV8TargetMachine::SparcV8TargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT)
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
+
+void SparcV9TargetMachine::anchor() { }
+
+SparcV9TargetMachine::SparcV9TargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT)
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, true) {}
+
+void SparcelTargetMachine::anchor() {}
+
+SparcelTargetMachine::SparcelTargetMachine(const Target &T, const Triple &TT,
+ StringRef CPU, StringRef FS,
+ const TargetOptions &Options,
+ Optional<Reloc::Model> RM,
+ Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT)
+ : SparcTargetMachine(T, TT, CPU, FS, Options, RM, CM, OL, JIT, false) {}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
new file mode 100644
index 000000000000..b0d76abeba7d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetMachine.h
@@ -0,0 +1,83 @@
+//===-- SparcTargetMachine.h - Define TargetMachine for Sparc ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Sparc specific subclass of TargetMachine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCTARGETMACHINE_H
+#define LLVM_LIB_TARGET_SPARC_SPARCTARGETMACHINE_H
+
+#include "SparcInstrInfo.h"
+#include "SparcSubtarget.h"
+#include "llvm/Target/TargetMachine.h"
+
+namespace llvm {
+
+class SparcTargetMachine : public LLVMTargetMachine {
+ std::unique_ptr<TargetLoweringObjectFile> TLOF;
+ SparcSubtarget Subtarget;
+ bool is64Bit;
+ mutable StringMap<std::unique_ptr<SparcSubtarget>> SubtargetMap;
+public:
+ SparcTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT, bool is64bit);
+ ~SparcTargetMachine() override;
+
+ const SparcSubtarget *getSubtargetImpl() const { return &Subtarget; }
+ const SparcSubtarget *getSubtargetImpl(const Function &) const override;
+
+ // Pass Pipeline Configuration
+ TargetPassConfig *createPassConfig(PassManagerBase &PM) override;
+ TargetLoweringObjectFile *getObjFileLowering() const override {
+ return TLOF.get();
+ }
+
+ bool isMachineVerifierClean() const override {
+ return false;
+ }
+};
+
+/// Sparc 32-bit target machine
+///
+class SparcV8TargetMachine : public SparcTargetMachine {
+ virtual void anchor();
+public:
+ SparcV8TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT);
+};
+
+/// Sparc 64-bit target machine
+///
+class SparcV9TargetMachine : public SparcTargetMachine {
+ virtual void anchor();
+public:
+ SparcV9TargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT);
+};
+
+class SparcelTargetMachine : public SparcTargetMachine {
+ virtual void anchor();
+
+public:
+ SparcelTargetMachine(const Target &T, const Triple &TT, StringRef CPU,
+ StringRef FS, const TargetOptions &Options,
+ Optional<Reloc::Model> RM, Optional<CodeModel::Model> CM,
+ CodeGenOpt::Level OL, bool JIT);
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp
new file mode 100644
index 000000000000..d0db854f7849
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.cpp
@@ -0,0 +1,48 @@
+//===------- SparcTargetObjectFile.cpp - Sparc Object Info Impl -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SparcTargetObjectFile.h"
+#include "MCTargetDesc/SparcMCExpr.h"
+#include "llvm/BinaryFormat/Dwarf.h"
+#include "llvm/CodeGen/MachineModuleInfoImpls.h"
+#include "llvm/CodeGen/TargetLowering.h"
+
+using namespace llvm;
+
+void SparcELFTargetObjectFile::Initialize(MCContext &Ctx,
+ const TargetMachine &TM) {
+ TargetLoweringObjectFileELF::Initialize(Ctx, TM);
+ InitializeELF(TM.Options.UseInitArray);
+}
+
+const MCExpr *SparcELFTargetObjectFile::getTTypeGlobalReference(
+ const GlobalValue *GV, unsigned Encoding, const TargetMachine &TM,
+ MachineModuleInfo *MMI, MCStreamer &Streamer) const {
+
+ if (Encoding & dwarf::DW_EH_PE_pcrel) {
+ MachineModuleInfoELF &ELFMMI = MMI->getObjFileInfo<MachineModuleInfoELF>();
+
+ MCSymbol *SSym = getSymbolWithGlobalValueBase(GV, ".DW.stub", TM);
+
+ // Add information about the stub reference to ELFMMI so that the stub
+ // gets emitted by the asmprinter.
+ MachineModuleInfoImpl::StubValueTy &StubSym = ELFMMI.getGVStubEntry(SSym);
+ if (!StubSym.getPointer()) {
+ MCSymbol *Sym = TM.getSymbol(GV);
+ StubSym = MachineModuleInfoImpl::StubValueTy(Sym, !GV->hasLocalLinkage());
+ }
+
+ MCContext &Ctx = getContext();
+ return SparcMCExpr::create(SparcMCExpr::VK_Sparc_R_DISP32,
+ MCSymbolRefExpr::create(SSym, Ctx), Ctx);
+ }
+
+ return TargetLoweringObjectFileELF::getTTypeGlobalReference(GV, Encoding, TM,
+ MMI, Streamer);
+}
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
new file mode 100644
index 000000000000..3b1b345c3b19
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetObjectFile.h
@@ -0,0 +1,37 @@
+//===-- SparcTargetObjectFile.h - Sparc Object Info -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCTARGETOBJECTFILE_H
+#define LLVM_LIB_TARGET_SPARC_SPARCTARGETOBJECTFILE_H
+
+#include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
+
+namespace llvm {
+
+class MCContext;
+class TargetMachine;
+
+class SparcELFTargetObjectFile : public TargetLoweringObjectFileELF {
+public:
+ SparcELFTargetObjectFile() :
+ TargetLoweringObjectFileELF()
+ {}
+
+ void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
+
+ const MCExpr *getTTypeGlobalReference(const GlobalValue *GV,
+ unsigned Encoding,
+ const TargetMachine &TM,
+ MachineModuleInfo *MMI,
+ MCStreamer &Streamer) const override;
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h b/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h
new file mode 100644
index 000000000000..3b503503abce
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/SparcTargetStreamer.h
@@ -0,0 +1,49 @@
+//===-- SparcTargetStreamer.h - Sparc Target Streamer ----------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIB_TARGET_SPARC_SPARCTARGETSTREAMER_H
+#define LLVM_LIB_TARGET_SPARC_SPARCTARGETSTREAMER_H
+
+#include "llvm/MC/MCELFStreamer.h"
+#include "llvm/MC/MCStreamer.h"
+
+namespace llvm {
+class SparcTargetStreamer : public MCTargetStreamer {
+ virtual void anchor();
+
+public:
+ SparcTargetStreamer(MCStreamer &S);
+ /// Emit ".register <reg>, #ignore".
+ virtual void emitSparcRegisterIgnore(unsigned reg) = 0;
+ /// Emit ".register <reg>, #scratch".
+ virtual void emitSparcRegisterScratch(unsigned reg) = 0;
+};
+
+// This part is for ascii assembly output
+class SparcTargetAsmStreamer : public SparcTargetStreamer {
+ formatted_raw_ostream &OS;
+
+public:
+ SparcTargetAsmStreamer(MCStreamer &S, formatted_raw_ostream &OS);
+ void emitSparcRegisterIgnore(unsigned reg) override;
+ void emitSparcRegisterScratch(unsigned reg) override;
+
+};
+
+// This part is for ELF object output
+class SparcTargetELFStreamer : public SparcTargetStreamer {
+public:
+ SparcTargetELFStreamer(MCStreamer &S);
+ MCELFStreamer &getStreamer();
+ void emitSparcRegisterIgnore(unsigned reg) override {}
+ void emitSparcRegisterScratch(unsigned reg) override {}
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp b/contrib/llvm/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp
new file mode 100644
index 000000000000..d030bd9f232d
--- /dev/null
+++ b/contrib/llvm/lib/Target/Sparc/TargetInfo/SparcTargetInfo.cpp
@@ -0,0 +1,35 @@
+//===-- SparcTargetInfo.cpp - Sparc Target Implementation -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Sparc.h"
+#include "llvm/IR/Module.h"
+#include "llvm/Support/TargetRegistry.h"
+using namespace llvm;
+
+Target &llvm::getTheSparcTarget() {
+ static Target TheSparcTarget;
+ return TheSparcTarget;
+}
+Target &llvm::getTheSparcV9Target() {
+ static Target TheSparcV9Target;
+ return TheSparcV9Target;
+}
+Target &llvm::getTheSparcelTarget() {
+ static Target TheSparcelTarget;
+ return TheSparcelTarget;
+}
+
+extern "C" void LLVMInitializeSparcTargetInfo() {
+ RegisterTarget<Triple::sparc, /*HasJIT=*/true> X(getTheSparcTarget(), "sparc",
+ "Sparc", "Sparc");
+ RegisterTarget<Triple::sparcv9, /*HasJIT=*/true> Y(
+ getTheSparcV9Target(), "sparcv9", "Sparc V9", "Sparc");
+ RegisterTarget<Triple::sparcel, /*HasJIT=*/true> Z(
+ getTheSparcelTarget(), "sparcel", "Sparc LE", "Sparc");
+}